From c800ffed2fc37824b5dff1708c72eaf0abe4fece Mon Sep 17 00:00:00 2001 From: Tim de Groot Date: Thu, 12 Mar 2026 00:50:35 +0100 Subject: [PATCH 1/8] Add unit tests for RobotDashboard and ApiServer - Created `test_robotdashboard.py` to cover various functionalities of the RobotDashboard class, including initialization, output processing, and dashboard creation. - Implemented tests for handling outputs, including error scenarios for invalid XML and duplicate logs. - Added tests for the `print_runs`, `remove_outputs`, and `create_dashboard` methods to ensure correct behavior under different configurations. - Developed `test_server.py` to test the ApiServer class and its FastAPI endpoints, utilizing MagicMock for the RobotDashboard instance. - Covered authentication, output addition, log management, and error handling in the server tests. - Ensured comprehensive coverage of both successful and failure scenarios for all endpoints. --- .github/skills/unit-tests.md | 74 ++ .github/workflows/tests.yml | 28 + .gitignore | 1 + .../keywords/general-keywords.resource | 2 +- atest/testsuites/00_cli.robot | 2 +- pyproject.toml | 4 + requirements-dev.txt | 5 +- robotframework_dashboard/abstractdb.py | 22 +- robotframework_dashboard/database.py | 6 + robotframework_dashboard/dependencies.py | 2 +- robotframework_dashboard/main.py | 2 +- robotframework_dashboard/server.py | 12 +- scripts/unittests.bat | 3 + scripts/unittests.sh | 5 + .../outputs/log-20250313-002134.html | 0 .../outputs/log-20250313-002151.html | 0 .../outputs/log-20250313-002222.html | 0 .../outputs/log-20250313-002257.html | 0 .../outputs/log-20250313-002338.html | 0 .../outputs/log-20250313-002400.html | 0 .../outputs/log-20250313-002431.html | 0 .../outputs/log-20250313-002457.html | 0 .../outputs/log-20250313-002528.html | 0 .../outputs/log-20250313-002549.html | 0 .../outputs/log-20250313-002636.html | 0 .../outputs/log-20250313-002703.html | 0 .../outputs/log-20250313-002739.html | 0 .../outputs/log-20250313-002915.html | 0 .../outputs/log-20250313-003006.html | 0 .../outputs/output-20250313-002134.xml | 0 .../outputs/output-20250313-002151.xml | 0 .../outputs/output-20250313-002222.xml | 0 .../outputs/output-20250313-002257.xml | 0 .../outputs/output-20250313-002338.xml | 0 .../outputs/output-20250313-002400.xml | 0 .../outputs/output-20250313-002431.xml | 0 .../outputs/output-20250313-002457.xml | 0 .../outputs/output-20250313-002528.xml | 0 .../outputs/output-20250313-002549.xml | 0 .../outputs/output-20250313-002636.xml | 0 .../outputs/output-20250313-002703.xml | 0 .../outputs/output-20250313-002739.xml | 0 .../outputs/output-20250313-002915.xml | 0 .../outputs/output-20250313-003006.xml | 0 tests/conftest.py | 60 ++ tests/test_arguments.py | 380 +++++++ tests/test_dashboard.py | 192 ++++ tests/test_database.py | 527 ++++++++++ tests/test_dependencies.py | 105 ++ tests/test_main.py | 101 ++ tests/test_processors.py | 415 ++++++++ tests/test_robotdashboard.py | 244 +++++ tests/test_server.py | 933 ++++++++++++++++++ 53 files changed, 3104 insertions(+), 21 deletions(-) create mode 100644 .github/skills/unit-tests.md create mode 100644 scripts/unittests.bat create mode 100644 scripts/unittests.sh rename {atest/resources => testdata}/outputs/log-20250313-002134.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002151.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002222.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002257.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002338.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002400.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002431.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002457.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002528.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002549.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002636.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002703.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002739.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-002915.html (100%) rename {atest/resources => testdata}/outputs/log-20250313-003006.html (100%) rename {atest/resources => testdata}/outputs/output-20250313-002134.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002151.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002222.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002257.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002338.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002400.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002431.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002457.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002528.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002549.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002636.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002703.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002739.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-002915.xml (100%) rename {atest/resources => testdata}/outputs/output-20250313-003006.xml (100%) create mode 100644 tests/conftest.py create mode 100644 tests/test_arguments.py create mode 100644 tests/test_dashboard.py create mode 100644 tests/test_database.py create mode 100644 tests/test_dependencies.py create mode 100644 tests/test_main.py create mode 100644 tests/test_processors.py create mode 100644 tests/test_robotdashboard.py create mode 100644 tests/test_server.py diff --git a/.github/skills/unit-tests.md b/.github/skills/unit-tests.md new file mode 100644 index 00000000..7e75cf32 --- /dev/null +++ b/.github/skills/unit-tests.md @@ -0,0 +1,74 @@ +--- +description: Use when working on, running, or reasoning about the Python unit tests in tests/. +--- + +# Unit Tests + +## How to run + +**Windows:** +```bat +scripts\unittests.bat +``` + +**Linux / macOS:** +```bash +bash scripts/unittests.sh +``` + +Both scripts run `pytest` with coverage reporting on the `robotframework_dashboard` package. + +## Test layout + +All unit tests live flat in `tests/` — no subdirectories. + +| File | What it covers | +|---|---| +| `tests/conftest.py` | Shared pytest fixtures (XML paths, `OutputProcessor`, `DatabaseProcessor`) | +| `tests/test_arguments.py` | `dotdict`, `_normalize_bool`, `_check_project_version_usage`, `_process_arguments` (all branches), `get_arguments` via mocked `sys.argv` | +| `tests/test_processors.py` | `OutputProcessor`: `get_run_start`, `get_output_data`, `calculate_keyword_averages`, `merge_run_and_suite_metadata`; legacy RF compat branches (`RunProcessor`, `SuiteProcessor`, `TestProcessor`, `KeywordProcessor`) | +| `tests/test_database.py` | `DatabaseProcessor`: table creation, schema migration, insert/get round-trip, all `remove_runs` strategies, `list_runs`, `vacuum_database`, `update_output_path`, static helpers | +| `tests/test_dashboard.py` | `DashboardGenerator`: `_compress_and_encode`, `_minify_text`, `generate_dashboard` (file creation, title, server mode, configs, subdirectory) | +| `tests/test_dependencies.py` | `DependencyProcessor`: JS/CSS block generation, CDN vs offline mode, admin-page variant, file gathering | +| `tests/test_robotdashboard.py` | `RobotDashboard`: `initialize_database`, `process_outputs`, `print_runs`, `remove_outputs`, `create_dashboard`, `get_runs`, `get_run_paths`, `update_output_path` | +| `tests/test_main.py` | `main()`: orchestration pipeline via mocked `ArgumentParser` and `RobotDashboard`; server branch | +| `tests/test_server.py` | `ApiServer`: all FastAPI endpoints via `TestClient` — auth, add/remove outputs, add/remove logs, file uploads (plain and gzip), catch-all resource route, autoupdate flag | + +## Test data + +Real `output.xml` files live in `testdata/outputs/`. These are the same 15 Robot Framework output files used by the acceptance tests — no synthetic mocks. Using real XMLs means `OutputProcessor` and `DatabaseProcessor` are exercised against genuine data, not fabricated inputs. + +Inline data fixtures (plain Python tuples/dicts) are used only for edge cases that real XMLs cannot cover, such as malformed inputs and single-entry keyword lists. + +## Testing the server + +`tests/test_server.py` uses `fastapi.testclient.TestClient` (backed by `httpx`) to exercise every endpoint in `ApiServer` without starting a real network process. The `RobotDashboard` dependency is replaced with a `MagicMock`, making every test fast and deterministic. `httpx` is a required dev dependency — install it with `pip install httpx`. + +Key patterns used: +- `_make_server()` helper creates an `ApiServer` with mock `RobotDashboard` attached. +- `monkeypatch.chdir(tmp_path)` is used whenever the server writes files to the current directory (e.g., `output_data`, file uploads). +- `client.request("DELETE", ...)` is used for the `DELETE` endpoints since `TestClient` has no `.delete()` method that accepts a JSON body. + +## Why no pytest-mock + +`pytest-mock` was considered and explicitly rejected for the pure-logic tests. The codebase has no need for it because: + +- Pure functions are tested directly with inline data. +- `DatabaseProcessor` is tested against an in-memory SQLite (`:memory:`) — no patching needed. +- `OutputProcessor` is tested with real XML files — no patching of `robot.api` needed. +- `server.py` endpoints use `TestClient` + `MagicMock` — the standard library `unittest.mock` is sufficient. + +## What is deliberately not unit-tested + +| Module | Reason | +|---|---| +| `main.py` (fully wired) | Pure orchestration entry point; the two `test_main.py` tests cover the call graph using mocks, but the real subprocess path (file I/O) is covered by acceptance tests | +| `abstractdb.py` abstract methods | These are abstract — by definition untestable without a concrete subclass; the concrete `DatabaseProcessor` is fully tested | + +## CI integration + +Unit tests run as a separate `unit-tests` job in `.github/workflows/tests.yml` **before** the Robot acceptance tests. The `robot-tests` job declares `needs: unit-tests`, so acceptance tests are skipped entirely if unit tests fail. This keeps CI fast: a broken pure-Python function fails in seconds rather than after the full heavyweight Playwright container spins up. + +## Schema migration test + +`test_schema_migration_runs_table_from_10_to_14` in `test_database.py` creates a legacy 10-column SQLite database by hand and asserts that `DatabaseProcessor.__init__` automatically migrates all four tables to their current column counts (runs: 14, suites: 11, tests: 12, keywords: 12). This protects against regressions when future schema columns are added. diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c0939a7f..3f76c050 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -4,7 +4,35 @@ on: pull_request: jobs: + unit-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install robotframework pytest pytest-cov + + - name: Run unit tests + run: | + bash scripts/unittests.sh + + - name: Upload coverage report + uses: actions/upload-artifact@v4 + with: + name: coverage-report + path: coverage.xml + robot-tests: + needs: unit-tests runs-on: ubuntu-latest container: image: mcr.microsoft.com/playwright:v1.56.0-jammy diff --git a/.gitignore b/.gitignore index f8fa56aa..127a8d8a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ results logs *.pyc +.coverage __pycache__ dist build diff --git a/atest/resources/keywords/general-keywords.resource b/atest/resources/keywords/general-keywords.resource index 172c0a23..5fb6e549 100644 --- a/atest/resources/keywords/general-keywords.resource +++ b/atest/resources/keywords/general-keywords.resource @@ -24,7 +24,7 @@ Generate Dashboard ${index} Get Dashboard Index Release Lock name=dashboard_index VAR ${DASHBOARD_INDEX} ${index} scope=test - ${output} Run command=robotdashboard -d robotresults_${DASHBOARD_INDEX}.db -f ${CURDIR}/../outputs:dev:prod:project_1 -n robotdashboard_${DASHBOARD_INDEX}.html + ${output} Run command=robotdashboard -d robotresults_${DASHBOARD_INDEX}.db -f ${CURDIR}/../../../testdata/outputs:dev:prod:project_1 -n robotdashboard_${DASHBOARD_INDEX}.html Log ${output} Remove Database And Dashboard diff --git a/atest/testsuites/00_cli.robot b/atest/testsuites/00_cli.robot index 354e410d..79f6ce97 100644 --- a/atest/testsuites/00_cli.robot +++ b/atest/testsuites/00_cli.robot @@ -7,7 +7,7 @@ Suite Teardown Run Teardown Only Once keyword=Remove Database And Dashboar *** Variables *** -${OUTPUTS_FOLDER} ${CURDIR}/../resources/outputs +${OUTPUTS_FOLDER} ${CURDIR}/../../testdata/outputs ${OS} ${None} # set on runtime diff --git a/pyproject.toml b/pyproject.toml index e69de29b..2fb79e05 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -0,0 +1,4 @@ +[tool.pytest.ini_options] +filterwarnings = [ + "ignore::ResourceWarning", +] diff --git a/requirements-dev.txt b/requirements-dev.txt index d0101df7..1357a224 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,4 +3,7 @@ setuptools build wheel twine -mysql-connector-python \ No newline at end of file +mysql-connector-python +pytest +pytest-cov +httpx \ No newline at end of file diff --git a/robotframework_dashboard/abstractdb.py b/robotframework_dashboard/abstractdb.py index e8b49ab0..c4e9905a 100644 --- a/robotframework_dashboard/abstractdb.py +++ b/robotframework_dashboard/abstractdb.py @@ -7,59 +7,59 @@ def __init_subclass__(cls, **kwargs): """Function to validate that the custom dabataseclass is named 'DatabaseProcessor' correctly""" super().__init_subclass__(**kwargs) if cls.__name__ != "DatabaseProcessor": - raise TypeError(f"The custom databaseclass classname must be 'DatabaseProcessor', not '{cls.__name__}'") + raise TypeError(f"The custom databaseclass classname must be 'DatabaseProcessor', not '{cls.__name__}'") # pragma: no cover @abstractmethod def __init__(self, database_path: Path) -> None: """Mandatory: This function should handle the creation of the tables if required The use of the database_path variable might not be required but you should still keep it as an argument! """ - pass + pass # pragma: no cover @abstractmethod def open_database(self) -> None: """Mandatory: This function should handle the connection to the database and set it for other functions to use""" - pass + pass # pragma: no cover @abstractmethod def close_database(self) -> None: """Mandatory: This function is called to close the connection to the database""" - pass + pass # pragma: no cover @abstractmethod def run_start_exists(self, run_start: str) -> bool: """Mandatory: This function is called to check if the output is already present in the database, this is done to save time on needless reprocessing. If you want a very simple implementation without complex logic you can simply "return False". This will work but will reprocess needlessly. """ - pass + pass # pragma: no cover @abstractmethod def insert_output_data( self, output_data: dict, tags: list, run_alias: str, path: Path, project_version: str, timezone: str = "" ) -> None: """Mandatory: This function inserts the data of an output file into the database""" - pass + pass # pragma: no cover @abstractmethod def get_data(self) -> dict: """Mandatory: This function gets all the data in the database""" - pass + pass # pragma: no cover @abstractmethod def list_runs(self) -> None: """Mandatory: This function gets all available runs and prints them to the console""" - pass + pass # pragma: no cover @abstractmethod def remove_runs(self, remove_runs: list) -> None: """Mandatory: This function removes all provided runs and all their corresponding data""" - pass + pass # pragma: no cover - def update_output_path(self, log_path: str) -> None: + def update_output_path(self, log_path: str) -> None: # pragma: no cover """Optional: Function to update the output_path using the log path that the server has used""" raise NotImplementedError("update_output_path is not implemented in the custom databaseclass, but is only required when using the server!") - def _get_run_paths(self) -> dict: + def _get_run_paths(self) -> dict: # pragma: no cover """Optional: Returns a dict mapping run_start -> path for all runs. Required by the server when automatically deleting log files after removing outputs. If not implemented, log files will not be automatically deleted on output removal.""" diff --git a/robotframework_dashboard/database.py b/robotframework_dashboard/database.py index 6b0aa9b1..3c126979 100644 --- a/robotframework_dashboard/database.py +++ b/robotframework_dashboard/database.py @@ -5,6 +5,11 @@ from time import time from datetime import datetime, timezone +# Explicit adapter for datetime -> ISO string, replacing the deprecated default +# behaviour removed in Python 3.12+. Compatible with Python 3.8+. +# See: https://docs.python.org/3/library/sqlite3.html#adapter-and-converter-recipes +sqlite3.register_adapter(datetime, lambda val: val.isoformat(sep=" ")) + class DatabaseProcessor(AbstractDatabaseProcessor): def __init__(self, database_path: Path): @@ -125,6 +130,7 @@ def get_keywords_length(): def close_database(self): """This function is called to close the connection to the database""" self.connection.close() + self.connection = None def insert_output_data( self, diff --git a/robotframework_dashboard/dependencies.py b/robotframework_dashboard/dependencies.py index 992677c8..f691686d 100644 --- a/robotframework_dashboard/dependencies.py +++ b/robotframework_dashboard/dependencies.py @@ -126,7 +126,7 @@ def _inline_js_modules(self, js_files): modules = {} for rel_path in js_files: abs_path = base / rel_path - if not abs_path.exists(): + if not abs_path.exists(): # pragma: no cover raise FileNotFoundError(f"JS module not found: {abs_path}") modules[str(abs_path)] = abs_path.read_text(encoding="utf-8") diff --git a/robotframework_dashboard/main.py b/robotframework_dashboard/main.py index df125c16..6d6b57b5 100644 --- a/robotframework_dashboard/main.py +++ b/robotframework_dashboard/main.py @@ -89,5 +89,5 @@ def main(): server.run() -if __name__ == "__main__": +if __name__ == "__main__": # pragma: no cover main() diff --git a/robotframework_dashboard/server.py b/robotframework_dashboard/server.py index 314b1c89..ff61ef68 100644 --- a/robotframework_dashboard/server.py +++ b/robotframework_dashboard/server.py @@ -302,7 +302,8 @@ def __init__( def _get_admin_page(self): admin_file = join(dirname(abspath(__file__)), "./templates", "admin.html") - admin_html = open(admin_file, "r").read() + with open(admin_file, "r", encoding="utf-8") as _f: + admin_html = _f.read() admin_html = admin_html.replace('"placeholder_version"', __version__) admin_html = admin_html.replace( "", @@ -335,8 +336,8 @@ def model_examples(model_cls: BaseModel): return openapi_examples def authenticate(credentials: HTTPBasicCredentials = Depends(self.security)): - if not self.server_user or not self.server_pass: - return "anonymous" + if not self.server_user or not self.server_pass: # pragma: no cover + return "anonymous" # pragma: no cover correct_username = compare_digest(credentials.username, self.server_user) correct_password = compare_digest(credentials.password, self.server_pass) if not (correct_username and correct_password): @@ -366,7 +367,8 @@ async def admin_page(username: str = Depends(authenticate)): ) async def dashboard_page(): """Serve robotdashboard HTML endpoint function""" - robot_dashboard_html = open("robot_dashboard.html", "r", encoding="utf-8").read() + with open("robot_dashboard.html", "r", encoding="utf-8") as _f: + robot_dashboard_html = _f.read() return robot_dashboard_html @self.app.post("/refresh-dashboard") @@ -800,6 +802,6 @@ def set_robotdashboard(self, robotdashboard: RobotDashboard): """Function to initialize the RobotDashboard class""" self.robotdashboard = robotdashboard - def run(self): + def run(self): # pragma: no cover """Function to start up the FastAPI server through uvicorn""" run(self.app, host=self.server_host, port=self.server_port) diff --git a/scripts/unittests.bat b/scripts/unittests.bat new file mode 100644 index 00000000..3a534452 --- /dev/null +++ b/scripts/unittests.bat @@ -0,0 +1,3 @@ +set COVERAGE_FILE=results/.coverage +set PYTHONPATH=%~dp0.. +python -m pytest tests/ --cov=robotframework_dashboard --cov-report=term-missing --cov-report=html:results/coverage diff --git a/scripts/unittests.sh b/scripts/unittests.sh new file mode 100644 index 00000000..00cde806 --- /dev/null +++ b/scripts/unittests.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COVERAGE_FILE=results/.coverage PYTHONPATH="$SCRIPT_DIR/.." python -m pytest tests/ --cov=robotframework_dashboard --cov-report=term-missing --cov-report=html:results/coverage diff --git a/atest/resources/outputs/log-20250313-002134.html b/testdata/outputs/log-20250313-002134.html similarity index 100% rename from atest/resources/outputs/log-20250313-002134.html rename to testdata/outputs/log-20250313-002134.html diff --git a/atest/resources/outputs/log-20250313-002151.html b/testdata/outputs/log-20250313-002151.html similarity index 100% rename from atest/resources/outputs/log-20250313-002151.html rename to testdata/outputs/log-20250313-002151.html diff --git a/atest/resources/outputs/log-20250313-002222.html b/testdata/outputs/log-20250313-002222.html similarity index 100% rename from atest/resources/outputs/log-20250313-002222.html rename to testdata/outputs/log-20250313-002222.html diff --git a/atest/resources/outputs/log-20250313-002257.html b/testdata/outputs/log-20250313-002257.html similarity index 100% rename from atest/resources/outputs/log-20250313-002257.html rename to testdata/outputs/log-20250313-002257.html diff --git a/atest/resources/outputs/log-20250313-002338.html b/testdata/outputs/log-20250313-002338.html similarity index 100% rename from atest/resources/outputs/log-20250313-002338.html rename to testdata/outputs/log-20250313-002338.html diff --git a/atest/resources/outputs/log-20250313-002400.html b/testdata/outputs/log-20250313-002400.html similarity index 100% rename from atest/resources/outputs/log-20250313-002400.html rename to testdata/outputs/log-20250313-002400.html diff --git a/atest/resources/outputs/log-20250313-002431.html b/testdata/outputs/log-20250313-002431.html similarity index 100% rename from atest/resources/outputs/log-20250313-002431.html rename to testdata/outputs/log-20250313-002431.html diff --git a/atest/resources/outputs/log-20250313-002457.html b/testdata/outputs/log-20250313-002457.html similarity index 100% rename from atest/resources/outputs/log-20250313-002457.html rename to testdata/outputs/log-20250313-002457.html diff --git a/atest/resources/outputs/log-20250313-002528.html b/testdata/outputs/log-20250313-002528.html similarity index 100% rename from atest/resources/outputs/log-20250313-002528.html rename to testdata/outputs/log-20250313-002528.html diff --git a/atest/resources/outputs/log-20250313-002549.html b/testdata/outputs/log-20250313-002549.html similarity index 100% rename from atest/resources/outputs/log-20250313-002549.html rename to testdata/outputs/log-20250313-002549.html diff --git a/atest/resources/outputs/log-20250313-002636.html b/testdata/outputs/log-20250313-002636.html similarity index 100% rename from atest/resources/outputs/log-20250313-002636.html rename to testdata/outputs/log-20250313-002636.html diff --git a/atest/resources/outputs/log-20250313-002703.html b/testdata/outputs/log-20250313-002703.html similarity index 100% rename from atest/resources/outputs/log-20250313-002703.html rename to testdata/outputs/log-20250313-002703.html diff --git a/atest/resources/outputs/log-20250313-002739.html b/testdata/outputs/log-20250313-002739.html similarity index 100% rename from atest/resources/outputs/log-20250313-002739.html rename to testdata/outputs/log-20250313-002739.html diff --git a/atest/resources/outputs/log-20250313-002915.html b/testdata/outputs/log-20250313-002915.html similarity index 100% rename from atest/resources/outputs/log-20250313-002915.html rename to testdata/outputs/log-20250313-002915.html diff --git a/atest/resources/outputs/log-20250313-003006.html b/testdata/outputs/log-20250313-003006.html similarity index 100% rename from atest/resources/outputs/log-20250313-003006.html rename to testdata/outputs/log-20250313-003006.html diff --git a/atest/resources/outputs/output-20250313-002134.xml b/testdata/outputs/output-20250313-002134.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002134.xml rename to testdata/outputs/output-20250313-002134.xml diff --git a/atest/resources/outputs/output-20250313-002151.xml b/testdata/outputs/output-20250313-002151.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002151.xml rename to testdata/outputs/output-20250313-002151.xml diff --git a/atest/resources/outputs/output-20250313-002222.xml b/testdata/outputs/output-20250313-002222.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002222.xml rename to testdata/outputs/output-20250313-002222.xml diff --git a/atest/resources/outputs/output-20250313-002257.xml b/testdata/outputs/output-20250313-002257.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002257.xml rename to testdata/outputs/output-20250313-002257.xml diff --git a/atest/resources/outputs/output-20250313-002338.xml b/testdata/outputs/output-20250313-002338.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002338.xml rename to testdata/outputs/output-20250313-002338.xml diff --git a/atest/resources/outputs/output-20250313-002400.xml b/testdata/outputs/output-20250313-002400.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002400.xml rename to testdata/outputs/output-20250313-002400.xml diff --git a/atest/resources/outputs/output-20250313-002431.xml b/testdata/outputs/output-20250313-002431.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002431.xml rename to testdata/outputs/output-20250313-002431.xml diff --git a/atest/resources/outputs/output-20250313-002457.xml b/testdata/outputs/output-20250313-002457.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002457.xml rename to testdata/outputs/output-20250313-002457.xml diff --git a/atest/resources/outputs/output-20250313-002528.xml b/testdata/outputs/output-20250313-002528.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002528.xml rename to testdata/outputs/output-20250313-002528.xml diff --git a/atest/resources/outputs/output-20250313-002549.xml b/testdata/outputs/output-20250313-002549.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002549.xml rename to testdata/outputs/output-20250313-002549.xml diff --git a/atest/resources/outputs/output-20250313-002636.xml b/testdata/outputs/output-20250313-002636.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002636.xml rename to testdata/outputs/output-20250313-002636.xml diff --git a/atest/resources/outputs/output-20250313-002703.xml b/testdata/outputs/output-20250313-002703.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002703.xml rename to testdata/outputs/output-20250313-002703.xml diff --git a/atest/resources/outputs/output-20250313-002739.xml b/testdata/outputs/output-20250313-002739.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002739.xml rename to testdata/outputs/output-20250313-002739.xml diff --git a/atest/resources/outputs/output-20250313-002915.xml b/testdata/outputs/output-20250313-002915.xml similarity index 100% rename from atest/resources/outputs/output-20250313-002915.xml rename to testdata/outputs/output-20250313-002915.xml diff --git a/atest/resources/outputs/output-20250313-003006.xml b/testdata/outputs/output-20250313-003006.xml similarity index 100% rename from atest/resources/outputs/output-20250313-003006.xml rename to testdata/outputs/output-20250313-003006.xml diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..c9a6293c --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,60 @@ +from pathlib import Path +import warnings +import pytest + +# Suppress sqlite3 ResourceWarning. These surface when coverage tracing changes +# GC timing, causing closed Connection objects to be collected during XML +# parsing. The connections ARE properly closed; this is a Python 3.12+ +# GC-strictness artefact that only appears with pytest-cov active. +warnings.filterwarnings("ignore", category=ResourceWarning) + +OUTPUTS_DIR = Path(__file__).parent.parent / "testdata" / "outputs" +SAMPLE_XML = OUTPUTS_DIR / "output-20250313-002134.xml" + + +@pytest.fixture +def xml_output(): + return SAMPLE_XML + + +@pytest.fixture +def all_xml_outputs(): + return sorted(OUTPUTS_DIR.glob("output-*.xml")) + + +@pytest.fixture +def processed_output(): + from robotframework_dashboard.processors import OutputProcessor + + processor = OutputProcessor(SAMPLE_XML) + processor.get_run_start() + return processor + + +@pytest.fixture +def db(tmp_path): + from robotframework_dashboard.database import DatabaseProcessor + + return DatabaseProcessor(tmp_path / "test.db") + + +@pytest.fixture +def populated_db(tmp_path): + from robotframework_dashboard.database import DatabaseProcessor + from robotframework_dashboard.processors import OutputProcessor + + db = DatabaseProcessor(tmp_path / "test.db") + processor = OutputProcessor(SAMPLE_XML) + processor.get_run_start() + data = processor.get_output_data() + db.open_database() + db.insert_output_data( + data, + tags=["dev", "prod"], + run_alias="my_alias", + path=SAMPLE_XML, + project_version=None, + timezone="+01:00", + ) + db.close_database() + return db diff --git a/tests/test_arguments.py b/tests/test_arguments.py new file mode 100644 index 00000000..fee8f10f --- /dev/null +++ b/tests/test_arguments.py @@ -0,0 +1,380 @@ +import argparse +import pytest +from robotframework_dashboard.arguments import dotdict, ArgumentParser + + +# --- dotdict --- + +def test_dotdict_attribute_access(): + d = dotdict({"key": "value"}) + assert d.key == "value" + + +def test_dotdict_missing_key_returns_none(): + d = dotdict() + assert d.missing_key is None + + +def test_dotdict_set_via_attribute(): + d = dotdict() + d.key = "value" + assert d["key"] == "value" + + +def test_dotdict_delete_via_attribute(): + d = dotdict({"key": "value"}) + del d.key + assert "key" not in d + + +def test_dotdict_nested(): + d = dotdict({"outer": dotdict({"inner": 42})}) + assert d.outer.inner == 42 + + +# --- _normalize_bool --- + +def test_normalize_bool_lowercase_true(): + assert ArgumentParser()._normalize_bool("true", "test") is True + + +def test_normalize_bool_uppercase_true(): + assert ArgumentParser()._normalize_bool("True", "test") is True + + +def test_normalize_bool_lowercase_false(): + assert ArgumentParser()._normalize_bool("false", "test") is False + + +def test_normalize_bool_uppercase_false(): + assert ArgumentParser()._normalize_bool("False", "test") is False + + +def test_normalize_bool_python_true(): + assert ArgumentParser()._normalize_bool(True, "test") is True + + +def test_normalize_bool_python_false(): + assert ArgumentParser()._normalize_bool(False, "test") is False + + +def test_normalize_bool_invalid_exits(): + with pytest.raises(SystemExit): + ArgumentParser()._normalize_bool("maybe", "test") + + +def test_normalize_bool_empty_string_exits(): + with pytest.raises(SystemExit): + ArgumentParser()._normalize_bool("", "test") + + +def test_normalize_bool_numeric_exits(): + with pytest.raises(SystemExit): + ArgumentParser()._normalize_bool("1", "test") + + +# --- _check_project_version_usage --- + +def test_check_project_version_no_tags_ok(): + args = argparse.Namespace(project_version=None) + # must not raise or exit + ArgumentParser()._check_project_version_usage(["dev", "prod"], args) + + +def test_check_project_version_one_version_tag_ok(): + args = argparse.Namespace(project_version=None) + ArgumentParser()._check_project_version_usage(["version_1.0", "dev"], args) + + +def test_check_project_version_two_version_tags_exits(): + args = argparse.Namespace(project_version=None) + with pytest.raises(SystemExit) as exc_info: + ArgumentParser()._check_project_version_usage(["version_1.0", "version_2.0"], args) + assert exc_info.value.code == 1 + + +def test_check_project_version_mixed_tag_and_arg_exits(): + args = argparse.Namespace(project_version="1.0") + with pytest.raises(SystemExit) as exc_info: + ArgumentParser()._check_project_version_usage(["version_1.0", "dev"], args) + assert exc_info.value.code == 2 + + +def test_check_project_version_empty_tags_ok(): + args = argparse.Namespace(project_version=None) + ArgumentParser()._check_project_version_usage([], args) + + +def test_check_project_version_project_version_without_tag_ok(): + args = argparse.Namespace(project_version="2.0") + ArgumentParser()._check_project_version_usage(["dev", "prod"], args) + + +# --- _process_arguments --- + +def _make_namespace(**kwargs): + """Create an argparse.Namespace with sane defaults for _process_arguments tests.""" + defaults = { + "version": False, + "outputpath": None, + "outputfolderpath": None, + "project_version": None, + "timezone": None, + "removeruns": None, + "generatedashboard": True, + "listruns": True, + "offlinedependencies": False, + "uselogs": False, + "forcejsonconfig": False, + "novacuum": False, + "noautoupdate": False, + "messageconfig": None, + "jsonconfig": None, + "namedashboard": "", + "databaseclass": None, + "server": None, + "quantity": None, + "databasepath": "robot_results.db", + "dashboardtitle": "", + } + defaults.update(kwargs) + return argparse.Namespace(**defaults) + + +def test_process_arguments_version_flag_exits(): + args = _make_namespace(version=True) + with pytest.raises(SystemExit): + ArgumentParser()._process_arguments(args) + + +def test_process_arguments_defaults_returns_dotdict(): + args = _make_namespace() + result = ArgumentParser()._process_arguments(args) + assert isinstance(result, dotdict) + assert result.database_path == "robot_results.db" + assert result.outputs is None + assert result.start_server is False + assert result.generate_dashboard is True + assert result.list_runs is True + + +def test_process_arguments_with_outputpath(): + args = _make_namespace(outputpath=[["path/to/output.xml"]]) + result = ArgumentParser()._process_arguments(args) + assert result.outputs is not None + assert len(result.outputs) == 1 + assert result.outputs[0][0] == "path/to/output.xml" + assert result.outputs[0][1] == [] + + +def test_process_arguments_with_outputpath_and_tags(): + args = _make_namespace(outputpath=[["path/to/output.xml:dev:prod"]]) + result = ArgumentParser()._process_arguments(args) + assert result.outputs[0][1] == ["dev", "prod"] + + +def test_process_arguments_with_outputfolderpath(): + args = _make_namespace(outputfolderpath=[["results/"]]) + result = ArgumentParser()._process_arguments(args) + assert result.output_folder_paths is not None + assert result.output_folder_paths[0][0] == "results/" + assert result.output_folder_paths[0][1] == [] + + +def test_process_arguments_with_outputfolderpath_and_tags(): + args = _make_namespace(outputfolderpath=[["results/:prod:nightly"]]) + result = ArgumentParser()._process_arguments(args) + assert result.output_folder_paths[0][1] == ["prod", "nightly"] + + +def test_process_arguments_with_removeruns(): + args = _make_namespace(removeruns=[["index=0,index=1"]]) + result = ArgumentParser()._process_arguments(args) + assert result.remove_runs == ["index=0", "index=1"] + + +def test_process_arguments_with_messageconfig(tmp_path): + msg_file = tmp_path / "messages.txt" + msg_file.write_text("Template: ${name}\nLine 2\n") + args = _make_namespace(messageconfig=str(msg_file)) + result = ArgumentParser()._process_arguments(args) + assert len(result.message_config) == 2 + assert "Template: ${name}" in result.message_config[0] + + +def test_process_arguments_with_jsonconfig(tmp_path): + config_file = tmp_path / "config.json" + config_file.write_text('{"color": "red"}') + args = _make_namespace(jsonconfig=str(config_file)) + result = ArgumentParser()._process_arguments(args) + assert '{"color": "red"}' in result.json_config + + +def test_process_arguments_forcejsonconfig_without_jsonconfig_exits(): + args = _make_namespace(forcejsonconfig=True, jsonconfig=None) + with pytest.raises(SystemExit): + ArgumentParser()._process_arguments(args) + + +def test_process_arguments_dashboard_name_empty_generates_timestamped(): + args = _make_namespace(namedashboard="") + result = ArgumentParser()._process_arguments(args) + assert result.dashboard_name.startswith("robot_dashboard_") + assert result.dashboard_name.endswith(".html") + + +def test_process_arguments_dashboard_name_no_extension(): + args = _make_namespace(namedashboard="my_dashboard") + result = ArgumentParser()._process_arguments(args) + assert result.dashboard_name == "my_dashboard.html" + + +def test_process_arguments_dashboard_name_with_html_extension(): + args = _make_namespace(namedashboard="my_dashboard.html") + result = ArgumentParser()._process_arguments(args) + assert result.dashboard_name == "my_dashboard.html" + + +def test_process_arguments_server_none(): + args = _make_namespace(server=None) + result = ArgumentParser()._process_arguments(args) + assert result.start_server is False + + +def test_process_arguments_server_default(): + args = _make_namespace(server="default") + result = ArgumentParser()._process_arguments(args) + assert result.start_server is True + assert result.server_host == "127.0.0.1" + assert result.server_port == 8543 + assert result.server_user == "" + assert result.server_pass == "" + + +def test_process_arguments_server_default_with_credentials(): + args = _make_namespace(server="default:admin:secret") + result = ArgumentParser()._process_arguments(args) + assert result.start_server is True + assert result.server_user == "admin" + assert result.server_pass == "secret" + + +def test_process_arguments_server_custom_host_port(): + args = _make_namespace(server="0.0.0.0:8080") + result = ArgumentParser()._process_arguments(args) + assert result.start_server is True + assert result.server_host == "0.0.0.0" + assert result.server_port == 8080 + + +def test_process_arguments_server_custom_all(): + args = _make_namespace(server="0.0.0.0:8080:admin:secret") + result = ArgumentParser()._process_arguments(args) + assert result.start_server is True + assert result.server_host == "0.0.0.0" + assert result.server_port == 8080 + assert result.server_user == "admin" + assert result.server_pass == "secret" + + +def test_process_arguments_quantity_default(): + args = _make_namespace(quantity=None) + result = ArgumentParser()._process_arguments(args) + assert result.quantity == 20 + + +def test_process_arguments_quantity_custom(): + args = _make_namespace(quantity="50") + result = ArgumentParser()._process_arguments(args) + assert int(result.quantity) == 50 + + +def test_process_arguments_timezone_provided(): + args = _make_namespace(timezone="+02:00") + result = ArgumentParser()._process_arguments(args) + assert result.timezone == "+02:00" + + +def test_process_arguments_timezone_auto_detected(): + import re + args = _make_namespace(timezone=None) + result = ArgumentParser()._process_arguments(args) + assert re.match(r"^[+-]\d{2}:\d{2}$", result.timezone) + + +def test_process_arguments_databaseclass_none(): + args = _make_namespace(databaseclass=None) + result = ArgumentParser()._process_arguments(args) + assert result.database_class is None + + +def test_process_arguments_databaseclass_not_exists_raises(tmp_path): + nonexistent = tmp_path / "nonexistent" / "db.py" + args = _make_namespace(databaseclass=str(nonexistent)) + with pytest.raises(Exception, match="ERROR"): + ArgumentParser()._process_arguments(args) + + +def test_process_arguments_databaseclass_valid(tmp_path): + db_class = tmp_path / "mydb.py" + db_class.write_text("# placeholder") + args = _make_namespace(databaseclass=str(db_class)) + result = ArgumentParser()._process_arguments(args) + assert result.database_class is not None + + +def test_process_arguments_returns_all_keys(): + args = _make_namespace() + result = ArgumentParser()._process_arguments(args) + expected_keys = { + "outputs", "output_folder_paths", "database_path", "generate_dashboard", + "dashboard_name", "generation_datetime", "list_runs", "remove_runs", + "dashboard_title", "database_class", "start_server", "server_host", + "server_port", "server_user", "server_pass", "json_config", + "message_config", "quantity", "use_logs", "offline_dependencies", + "force_json_config", "project_version", "no_vacuum", "timezone", + "no_autoupdate", + } + for key in expected_keys: + assert key in result, f"Missing key: {key}" + + +# --- get_arguments (full pipeline via mocked sys.argv) --- + +def test_get_arguments_minimal_argv(): + """Exercises _parse_arguments() to cover all parser.add_argument() setup.""" + from unittest.mock import patch + with patch("sys.argv", ["robotdashboard"]): + result = ArgumentParser().get_arguments() + assert result.database_path == "robot_results.db" + assert result.outputs is None + assert result.start_server is False + + +def test_get_arguments_with_output_flag(): + from unittest.mock import patch + with patch("sys.argv", ["robotdashboard", "-o", "results/output.xml"]): + result = ArgumentParser().get_arguments() + assert result.outputs is not None + assert len(result.outputs) == 1 + + +def test_get_arguments_exception_calls_exit(): + """Ensure get_arguments handles exceptions from _process_arguments gracefully.""" + from unittest.mock import patch + # --forcejsonconfig without --jsonconfig triggers exit via _process_arguments + with patch("sys.argv", ["robotdashboard", "--forcejsonconfig", "true"]): + with pytest.raises(SystemExit): + ArgumentParser().get_arguments() + + +def test_get_arguments_parse_exception_prints_error(capsys): + """When _parse_arguments raises Exception the except block prints error and exits.""" + from unittest.mock import patch + parser = ArgumentParser() + with patch.object(parser, "_parse_arguments", side_effect=Exception("bad parse")): + with pytest.raises(SystemExit): + parser.get_arguments() + captured = capsys.readouterr() + assert "ERROR" in captured.out + assert "bad parse" in captured.out diff --git a/tests/test_dashboard.py b/tests/test_dashboard.py new file mode 100644 index 00000000..297eeb8f --- /dev/null +++ b/tests/test_dashboard.py @@ -0,0 +1,192 @@ +import json +import zlib +import base64 +import pytest +from robotframework_dashboard.dashboard import DashboardGenerator + + +# --- _compress_and_encode --- + +def test_compress_and_encode_returns_string(): + result = DashboardGenerator()._compress_and_encode({"key": "value"}) + assert isinstance(result, str) + + +def test_compress_and_encode_round_trip_list(): + obj = [{"run_start": "2025-01-01", "name": "Test", "total": 10}] + encoded = DashboardGenerator()._compress_and_encode(obj) + decoded = json.loads(zlib.decompress(base64.b64decode(encoded))) + assert decoded == obj + + +def test_compress_and_encode_round_trip_dict(): + obj = {"runs": [1, 2, 3], "tests": [{"name": "a"}]} + encoded = DashboardGenerator()._compress_and_encode(obj) + decoded = json.loads(zlib.decompress(base64.b64decode(encoded))) + assert decoded == obj + + +def test_compress_and_encode_empty_list(): + encoded = DashboardGenerator()._compress_and_encode([]) + decoded = json.loads(zlib.decompress(base64.b64decode(encoded))) + assert decoded == [] + + +def test_compress_and_encode_empty_dict(): + encoded = DashboardGenerator()._compress_and_encode({}) + decoded = json.loads(zlib.decompress(base64.b64decode(encoded))) + assert decoded == {} + + +def test_compress_and_encode_is_deterministic(): + obj = {"key": "value", "number": 42} + gen = DashboardGenerator() + assert gen._compress_and_encode(obj) == gen._compress_and_encode(obj) + + +def test_compress_and_encode_unicode(): + obj = {"name": "Ünïcödé tëst"} + encoded = DashboardGenerator()._compress_and_encode(obj) + decoded = json.loads(zlib.decompress(base64.b64decode(encoded))) + assert decoded == obj + + +# --- _minify_text --- + +def test_minify_text_removes_blank_lines(): + text = "line1\n\nline2\n\nline3\n" + result = DashboardGenerator()._minify_text(text) + assert result == "line1\nline2\nline3" + + +def test_minify_text_removes_whitespace_only_lines(): + text = "line1\n \nline2" + result = DashboardGenerator()._minify_text(text) + assert result == "line1\nline2" + + +def test_minify_text_strips_indentation(): + text = " indented\n line" + result = DashboardGenerator()._minify_text(text) + assert result == "indented\nline" + + +def test_minify_text_empty_input(): + result = DashboardGenerator()._minify_text("") + assert result == "" + + +def test_minify_text_all_blank_lines(): + result = DashboardGenerator()._minify_text("\n\n\n") + assert result == "" + + +def test_minify_text_single_line_unchanged(): + result = DashboardGenerator()._minify_text("hello") + assert result == "hello" + + +def test_minify_text_no_trailing_newline(): + result = DashboardGenerator()._minify_text("a\nb\n") + assert not result.endswith("\n") + + +# --- generate_dashboard --- + +from datetime import datetime +from pathlib import Path + +_EMPTY_DATA = {"runs": [], "suites": [], "tests": [], "keywords": []} + + +def _call_generate(tmp_path, **kwargs): + """Helper that calls generate_dashboard with sensible defaults.""" + output = tmp_path / "dashboard.html" + defaults = dict( + name_dashboard=str(output), + data=_EMPTY_DATA, + generation_datetime=datetime(2025, 1, 1, 12, 0, 0), + dashboard_title="", + server=False, + json_config=None, + message_config=[], + quantity=20, + use_logs=False, + offline=False, + force_json_config=False, + ) + defaults.update(kwargs) + DashboardGenerator().generate_dashboard(**defaults) + return output + + +def test_generate_dashboard_creates_file(tmp_path): + output = _call_generate(tmp_path) + assert output.exists() + assert output.stat().st_size > 0 + + +def test_generate_dashboard_custom_title(tmp_path): + output = _call_generate(tmp_path, dashboard_title="My Custom Dashboard") + content = output.read_text(encoding="utf-8") + assert "My Custom Dashboard" in content + + +def test_generate_dashboard_default_title_uses_datetime(tmp_path): + output = _call_generate(tmp_path, dashboard_title="") + content = output.read_text(encoding="utf-8") + assert "2025-01-01" in content + + +def test_generate_dashboard_server_mode(tmp_path): + output = _call_generate(tmp_path, server=True) + content = output.read_text(encoding="utf-8") + assert "true" in content + + +def test_generate_dashboard_no_server_mode(tmp_path): + output = _call_generate(tmp_path, server=False) + content = output.read_text(encoding="utf-8") + assert "false" in content + + +def test_generate_dashboard_with_message_config(tmp_path): + output = _call_generate(tmp_path, message_config=["Template: ${name}", "Alert: ${value}"]) + content = output.read_text(encoding="utf-8") + assert "Template" in content + + +def test_generate_dashboard_with_json_config(tmp_path): + output = _call_generate(tmp_path, json_config='{"theme": "dark"}') + content = output.read_text(encoding="utf-8") + assert "dark" in content + + +def test_generate_dashboard_use_logs_true(tmp_path): + output = _call_generate(tmp_path, use_logs=True) + content = output.read_text(encoding="utf-8") + assert "true" in content + + +def test_generate_dashboard_empty_runs_prints_warning(tmp_path, capsys): + _call_generate(tmp_path, data=_EMPTY_DATA) + captured = capsys.readouterr() + assert "WARNING" in captured.out + + +def test_generate_dashboard_subdirectory_created(tmp_path): + subdir_output = tmp_path / "sub" / "deeper" / "dashboard.html" + DashboardGenerator().generate_dashboard( + name_dashboard=str(subdir_output), + data=_EMPTY_DATA, + generation_datetime=datetime.now(), + dashboard_title="", + server=False, + json_config=None, + message_config=[], + quantity=20, + use_logs=False, + offline=False, + force_json_config=False, + ) + assert subdir_output.exists() diff --git a/tests/test_database.py b/tests/test_database.py new file mode 100644 index 00000000..eea24eda --- /dev/null +++ b/tests/test_database.py @@ -0,0 +1,527 @@ +import sqlite3 +import re +from pathlib import Path +import pytest +from robotframework_dashboard.database import DatabaseProcessor +from robotframework_dashboard.processors import OutputProcessor + +OUTPUTS_DIR = Path(__file__).parent.parent / "testdata" / "outputs" +SAMPLE_XML = OUTPUTS_DIR / "output-20250313-002134.xml" +SAMPLE_XML_2 = OUTPUTS_DIR / "output-20250313-002151.xml" + + +# --- open / close --- + +def test_open_database_creates_connection(db): + db.open_database() + assert db.connection is not None + db.close_database() + + +def test_close_database_closes_connection(db): + db.open_database() + db.close_database() + with pytest.raises(Exception): + db.connection.cursor() + + +# --- _create_tables --- + +def test_create_tables_results_in_four_tables(db): + db.open_database() + tables = { + row[0] + for row in db.connection.cursor() + .execute("SELECT name FROM sqlite_master WHERE type='table'") + .fetchall() + } + db.close_database() + assert {"runs", "suites", "tests", "keywords"}.issubset(tables) + + +def test_runs_table_column_count(db): + db.open_database() + cols = db.connection.cursor().execute("PRAGMA table_info(runs)").fetchall() + db.close_database() + assert len(cols) == 14 + + +def test_suites_table_column_count(db): + db.open_database() + cols = db.connection.cursor().execute("PRAGMA table_info(suites)").fetchall() + db.close_database() + assert len(cols) == 11 + + +def test_tests_table_column_count(db): + db.open_database() + cols = db.connection.cursor().execute("PRAGMA table_info(tests)").fetchall() + db.close_database() + assert len(cols) == 12 + + +def test_keywords_table_column_count(db): + db.open_database() + cols = db.connection.cursor().execute("PRAGMA table_info(keywords)").fetchall() + db.close_database() + assert len(cols) == 12 + + +# --- run_start_exists --- + +def test_run_start_not_in_empty_db(db): + db.open_database() + assert db.run_start_exists("2025-03-13 00:21:34.707148") is False + db.close_database() + + +def test_run_start_exists_after_insert(populated_db): + populated_db.open_database() + # The fixture stores with +01:00; startswith check in run_start_exists handles this + result = populated_db.run_start_exists("2025-03-13 00:21:34.707148") + populated_db.close_database() + assert result is True + + +def test_run_start_with_full_tz_also_found(populated_db): + populated_db.open_database() + result = populated_db.run_start_exists("2025-03-13 00:21:34.707148+01:00") + populated_db.close_database() + assert result is True + + +# --- insert_output_data / get_data --- + +def test_insert_and_get_data_round_trip(populated_db): + populated_db.open_database() + data = populated_db.get_data() + populated_db.close_database() + assert len(data["runs"]) == 1 + assert len(data["suites"]) > 0 + assert len(data["tests"]) > 0 + assert len(data["keywords"]) > 0 + + +def test_tags_stored_correctly(populated_db): + populated_db.open_database() + data = populated_db.get_data() + populated_db.close_database() + assert "dev" in data["runs"][0]["tags"] + assert "prod" in data["runs"][0]["tags"] + + +def test_alias_stored_correctly(populated_db): + populated_db.open_database() + data = populated_db.get_data() + populated_db.close_database() + assert data["runs"][0]["run_alias"] == "my_alias" + + +def test_timezone_appended_to_run_start(populated_db): + populated_db.open_database() + data = populated_db.get_data() + populated_db.close_database() + assert "+01:00" in data["runs"][0]["run_start"] + + +def test_project_version_stored(tmp_path): + db = DatabaseProcessor(tmp_path / "pv.db") + processor = OutputProcessor(SAMPLE_XML) + processor.get_run_start() + data = processor.get_output_data() + db.open_database() + db.insert_output_data(data, [], "alias", SAMPLE_XML, "2.5.0") + result = db.get_data() + db.close_database() + assert result["runs"][0]["project_version"] == "2.5.0" + + +# --- remove_runs --- + +def _insert_second_run(populated_db): + """Helper: insert a second run from a different XML into populated_db.""" + processor = OutputProcessor(SAMPLE_XML_2) + processor.get_run_start() + data = processor.get_output_data() + populated_db.insert_output_data(data, [], "alias2", SAMPLE_XML_2, None) + + +def test_remove_by_index(populated_db): + populated_db.open_database() + _insert_second_run(populated_db) + assert len(populated_db.get_data()["runs"]) == 2 + populated_db.remove_runs(["index=0"]) + assert len(populated_db.get_data()["runs"]) == 1 + populated_db.close_database() + + +def test_remove_by_index_range(populated_db): + populated_db.open_database() + _insert_second_run(populated_db) + assert len(populated_db.get_data()["runs"]) == 2 + populated_db.remove_runs(["index=0:1"]) + assert len(populated_db.get_data()["runs"]) == 0 + populated_db.close_database() + + +def test_remove_by_alias(populated_db): + populated_db.open_database() + populated_db.remove_runs(["alias=my_alias"]) + assert len(populated_db.get_data()["runs"]) == 0 + populated_db.close_database() + + +def test_remove_by_tag(populated_db): + populated_db.open_database() + populated_db.remove_runs(["tag=dev"]) + assert len(populated_db.get_data()["runs"]) == 0 + populated_db.close_database() + + +def test_remove_by_tag_no_match_is_noop(populated_db): + populated_db.open_database() + populated_db.remove_runs(["tag=nonexistent"]) + assert len(populated_db.get_data()["runs"]) == 1 + populated_db.close_database() + + +def test_remove_by_limit_keeps_most_recent(populated_db): + populated_db.open_database() + _insert_second_run(populated_db) + assert len(populated_db.get_data()["runs"]) == 2 + populated_db.remove_runs(["limit=1"]) + assert len(populated_db.get_data()["runs"]) == 1 + populated_db.close_database() + + +def test_remove_by_limit_higher_than_count_is_noop(populated_db): + populated_db.open_database() + populated_db.remove_runs(["limit=100"]) + assert len(populated_db.get_data()["runs"]) == 1 + + +# --- list_runs --- + +def test_list_runs_empty_prints_warning(db, capsys): + db.open_database() + db.list_runs() + db.close_database() + captured = capsys.readouterr() + assert "WARNING" in captured.out + + +def test_list_runs_populated_prints_run_info(populated_db, capsys): + populated_db.open_database() + populated_db.list_runs() + populated_db.close_database() + captured = capsys.readouterr() + assert "Run 0" in captured.out + + +# --- vacuum_database --- + +def test_vacuum_database_returns_console(populated_db): + populated_db.open_database() + console = populated_db.vacuum_database() + populated_db.close_database() + assert "Vacuumed" in console + + +# --- update_output_path --- + +def test_update_output_path_found(populated_db): + populated_db.open_database() + # SAMPLE_XML is stored as path; its corresponding log is log-20250313-002134.html + log_path = str(OUTPUTS_DIR / "log-20250313-002134.html") + console = populated_db.update_output_path(log_path) + populated_db.close_database() + assert "Executed query" in console + + +def test_update_output_path_not_found(populated_db): + populated_db.open_database() + console = populated_db.update_output_path("path/to/nonexistent-log.html") + populated_db.close_database() + assert "ERROR" in console + + +# --- remove_runs by run_start --- + +def test_remove_runs_by_run_start(populated_db): + populated_db.open_database() + data = populated_db.get_data() + run_start = data["runs"][0]["run_start"] + populated_db.remove_runs([f"run_start={run_start}"]) + assert len(populated_db.get_data()["runs"]) == 0 + populated_db.close_database() + + +def test_remove_runs_by_run_start_not_found_logs_error(populated_db): + populated_db.open_database() + console = populated_db.remove_runs(["run_start=2000-01-01 00:00:00.000000+00:00"]) + populated_db.close_database() + assert "ERROR" in console + + +def test_remove_runs_invalid_format_logs_error(populated_db): + populated_db.open_database() + console = populated_db.remove_runs(["invalid_format=something"]) + populated_db.close_database() + assert "ERROR" in console + + +def test_remove_runs_semicolon_separated_indexes(populated_db): + populated_db.open_database() + _insert_second_run(populated_db) + assert len(populated_db.get_data()["runs"]) == 2 + populated_db.remove_runs(["index=0;1"]) + assert len(populated_db.get_data()["runs"]) == 0 + populated_db.close_database() + + +# --- get_data with duplicate aliases --- + +def test_get_data_duplicate_aliases(tmp_path): + """Two runs with the same alias → second gets a counter suffix.""" + db = DatabaseProcessor(tmp_path / "dup.db") + processor1 = OutputProcessor(SAMPLE_XML) + processor1.get_run_start() + data1 = processor1.get_output_data() + + processor2 = OutputProcessor(SAMPLE_XML_2) + processor2.get_run_start() + data2 = processor2.get_output_data() + + db.open_database() + db.insert_output_data(data1, [], "same_alias", SAMPLE_XML, None) + db.insert_output_data(data2, [], "same_alias", SAMPLE_XML_2, None) + result = db.get_data() + db.close_database() + + aliases = [run["run_alias"] for run in result["runs"]] + assert "same_alias" in aliases + # The second entry should have a counter appended + assert any(a != "same_alias" and "same_alias" in a for a in aliases) + + +# --- get_data without timezone stored (adds local tz) --- + +def test_get_data_run_without_timezone_gets_tz_appended(tmp_path): + db = DatabaseProcessor(tmp_path / "notz.db") + processor = OutputProcessor(SAMPLE_XML) + processor.get_run_start() + data = processor.get_output_data() + db.open_database() + # Insert without timezone + db.insert_output_data(data, [], "alias", SAMPLE_XML, None, timezone="") + result = db.get_data() + db.close_database() + # Local timezone should have been appended by get_data + assert re.match(r".*[+-]\d{2}:\d{2}$", result["runs"][0]["run_start"]) + + +def test_remove_by_run_start(populated_db): + populated_db.open_database() + data = populated_db.get_data() + run_start = data["runs"][0]["run_start"] + populated_db.remove_runs([f"run_start={run_start}"]) + assert len(populated_db.get_data()["runs"]) == 0 + populated_db.close_database() + + +# --- _has_timezone_offset (static) --- + +@pytest.mark.parametrize("run_start,expected", [ + ("2025-03-13 00:21:34.707148+01:00", True), + ("2025-03-13 00:21:34.707148-05:30", True), + ("2025-03-13 00:21:34.707148+00:00", True), + ("2025-03-13 00:21:34.707148", False), + ("short", False), + ("", False), +]) +def test_has_timezone_offset(run_start, expected): + assert DatabaseProcessor._has_timezone_offset(run_start) is expected + + +# --- _get_local_timezone_offset (static) --- + +def test_get_local_timezone_offset_format(): + result = DatabaseProcessor._get_local_timezone_offset() + assert re.match(r"^[+-]\d{2}:\d{2}$", result), f"Unexpected format: {result}" + + +# --- _dict_from_row (static) --- + +def test_dict_from_row(): + conn = sqlite3.connect(":memory:") + conn.row_factory = sqlite3.Row + conn.execute("CREATE TABLE t (a TEXT, b INTEGER)") + conn.execute("INSERT INTO t VALUES ('hello', 42)") + row = conn.execute("SELECT * FROM t").fetchone() + # _dict_from_row is an instance method (no @staticmethod decorator) + db_inst = DatabaseProcessor.__new__(DatabaseProcessor) + result = db_inst._dict_from_row(row) + conn.close() + assert result == {"a": "hello", "b": 42} + + +# --- schema migration --- + +def test_schema_migration_runs_table_from_10_to_14(tmp_path): + """A legacy runs table with 10 columns should be migrated to 14 columns.""" + db_path = tmp_path / "legacy.db" + conn = sqlite3.connect(str(db_path)) + conn.execute(""" + CREATE TABLE runs ( + "run_start" TEXT, "full_name" TEXT, "name" TEXT, + "total" INTEGER, "passed" INTEGER, "failed" INTEGER, + "skipped" INTEGER, "elapsed_s" TEXT, "start_time" TEXT, "tags" TEXT, + UNIQUE(run_start, full_name) + ) + """) + conn.execute(""" + CREATE TABLE suites ( + "run_start" TEXT, "full_name" TEXT, "name" TEXT, + "total" INTEGER, "passed" INTEGER, "failed" INTEGER, + "skipped" INTEGER, "elapsed_s" TEXT, "start_time" TEXT + ) + """) + conn.execute(""" + CREATE TABLE tests ( + "run_start" TEXT, "full_name" TEXT, "name" TEXT, + "passed" INTEGER, "failed" INTEGER, "skipped" INTEGER, + "elapsed_s" TEXT, "start_time" TEXT, "message" TEXT + ) + """) + conn.execute(""" + CREATE TABLE keywords ( + "run_start" TEXT, "name" TEXT, "passed" INTEGER, "failed" INTEGER, + "skipped" INTEGER, "times_run" TEXT, "total_time_s" TEXT, + "average_time_s" TEXT, "min_time_s" TEXT, "max_time_s" TEXT + ) + """) + conn.commit() + conn.close() + + # Opening via DatabaseProcessor should trigger migration + db = DatabaseProcessor(str(db_path)) + db.open_database() + runs_cols = db.connection.cursor().execute("PRAGMA table_info(runs)").fetchall() + suites_cols = db.connection.cursor().execute("PRAGMA table_info(suites)").fetchall() + tests_cols = db.connection.cursor().execute("PRAGMA table_info(tests)").fetchall() + keywords_cols = db.connection.cursor().execute("PRAGMA table_info(keywords)").fetchall() + db.close_database() + + assert len(runs_cols) == 14 + assert len(suites_cols) == 11 + assert len(tests_cols) == 12 + assert len(keywords_cols) == 12 + + +# --- insert_output_data exception path --- + +def test_insert_output_data_exception_prints_error(db, capsys): + """insert_output_data catches exceptions from _insert_runs and prints an error.""" + from unittest.mock import patch + db.open_database() + fake_data = {"runs": [], "suites": [], "tests": [], "keywords": []} + with patch.object(db, "_insert_runs", side_effect=Exception("boom")): + db.insert_output_data(fake_data, [], "alias", "path.xml", None) + db.close_database() + captured = capsys.readouterr() + assert "ERROR" in captured.out + + +# --- get_data backward-compatibility branches --- + +def test_get_data_null_run_alias_generates_auto_alias(db): + """get_data() assigns 'Alias 0' when run_alias is NULL (pre-0.6.0 compat).""" + db.open_database() + db.connection.execute( + "INSERT INTO runs VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + ("2020-01-01 00:00:00+00:00", "Suite", "Suite", 1, 1, 0, 0, "1.0", + "2020-01-01", "tag", None, "/some/path.xml", "{}", None), + ) + db.connection.commit() + data = db.get_data() + db.close_database() + assert data["runs"][0]["run_alias"] == "Alias 1" + + +def test_get_data_empty_run_alias_generates_auto_alias(db): + """get_data() assigns 'Alias 1' when run_alias is '' (pre-0.6.0 compat).""" + db.open_database() + db.connection.execute( + "INSERT INTO runs VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + ("2020-01-01 00:00:00+00:00", "Suite", "Suite", 1, 1, 0, 0, "1.0", + "2020-01-01", "tag", "", "/some/path.xml", "{}", None), + ) + db.connection.commit() + data = db.get_data() + db.close_database() + assert data["runs"][0]["run_alias"] == "Alias 1" + + +def test_get_data_null_path_becomes_empty_string(db): + """get_data() replaces NULL path with '' (pre-0.8.1 compat).""" + db.open_database() + db.connection.execute( + "INSERT INTO runs VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + ("2020-01-01 00:00:00+00:00", "Suite", "Suite", 1, 1, 0, 0, "1.0", + "2020-01-01", "tag", "alias_np", None, "{}", None), + ) + db.connection.commit() + data = db.get_data() + db.close_database() + assert data["runs"][0]["path"] == "" + + +def test_get_data_null_suite_id(db): + """get_data() handles NULL suite id (pre-0.8.4 compat).""" + db.open_database() + db.connection.execute( + "INSERT INTO runs VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + ("2020-01-01 00:00:00+00:00", "Suite", "Suite", 1, 1, 0, 0, "1.0", + "2020-01-01", "tag", "alias_si", "/path.xml", "{}", None), + ) + db.connection.execute( + "INSERT INTO suites VALUES (?,?,?,?,?,?,?,?,?,?,?)", + ("2020-01-01 00:00:00+00:00", "Suite.Sub", "Sub", 1, 1, 0, 0, "0.5", + "2020-01-01", "alias_si", None), + ) + db.connection.commit() + data = db.get_data() + db.close_database() + assert len(data["suites"]) == 1 + + +def test_get_data_null_test_tags_and_id(db): + """get_data() handles NULL test tags and NULL test id (pre-0.8.4 compat).""" + db.open_database() + db.connection.execute( + "INSERT INTO runs VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + ("2020-01-01 00:00:00+00:00", "Suite", "Suite", 1, 1, 0, 0, "1.0", + "2020-01-01", "tag", "alias_ti", "/path.xml", "{}", None), + ) + db.connection.execute( + "INSERT INTO tests VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", + ("2020-01-01 00:00:00+00:00", "Suite.Test", "Test", 1, 0, 0, "0.1", + "2020-01-01", "OK", None, "alias_ti", None), + ) + db.connection.commit() + data = db.get_data() + db.close_database() + assert len(data["tests"]) == 1 + assert data["tests"][0]["tags"] == "" + + +# --- remove_runs bare except branch --- + +def test_remove_runs_exception_branch_logs_error(populated_db): + """remove_runs() catches exceptions (e.g. index parse error) in its bare except.""" + populated_db.open_database() + # "index=not_a_number" matches the 'elif "index=" in run' branch but + # int("not_a_number") raises ValueError, which the bare except catches. + console = populated_db.remove_runs(["index=not_a_number"]) + populated_db.close_database() + assert "ERROR" in console diff --git a/tests/test_dependencies.py b/tests/test_dependencies.py new file mode 100644 index 00000000..ba6ac938 --- /dev/null +++ b/tests/test_dependencies.py @@ -0,0 +1,105 @@ +import pytest +from robotframework_dashboard.dependencies import DependencyProcessor + + +# --- get_js_block --- + +def test_get_js_block_returns_script_tag(): + result = DependencyProcessor().get_js_block() + assert "