diff --git a/common/log_parser/generate_acs_summary.py b/common/log_parser/generate_acs_summary.py
index 556077a8..a26c6b7b 100644
--- a/common/log_parser/generate_acs_summary.py
+++ b/common/log_parser/generate_acs_summary.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2026, Arm Limited or its affiliates. All rights reserved.
+# Copyright (c) 2024-2026, Arm Limited or its affiliates. All rights reserved.
# SPDX-License-Identifier : Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/common/log_parser/main_log_parser.sh b/common/log_parser/main_log_parser.sh
index 4b44c9ba..602c3fe7 100755
--- a/common/log_parser/main_log_parser.sh
+++ b/common/log_parser/main_log_parser.sh
@@ -45,6 +45,7 @@ LOGS_PATH=$1
ACS_CONFIG_PATH=$2
SYSTEM_CONFIG_PATH=$3
WAIVER_JSON=$4
+POST_SCRIPT_LOG="$LOGS_PATH/post-script/post-script.log"
if [ $YOCTO_FLAG_PRESENT -eq 1 ]; then
test_category="/usr/bin/log_parser/test_categoryDT.json"
@@ -443,7 +444,6 @@ fi
# POST-SCRIPT LOG PARSING
################################################################################
if [ $YOCTO_FLAG_PRESENT -eq 1 ]; then
- POST_SCRIPT_LOG="$LOGS_PATH/post-script/post-script.log"
POST_SCRIPT_JSON="$JSONS_DIR/post_script.json"
# Attempt to parse post-script.log if it exists
@@ -605,6 +605,7 @@ fi
################################################################################
# OS TESTS PARSING
################################################################################
+OS_JSONS=()
if [ $YOCTO_FLAG_PRESENT -eq 1 ]; then
OS_LOGS_PATH="$(dirname "$LOGS_PATH")/os-logs"
OS_JSONS_DIR="$JSONS_DIR"
@@ -655,6 +656,37 @@ if [ $YOCTO_FLAG_PRESENT -eq 1 ]; then
fi
fi
+if [ $YOCTO_FLAG_PRESENT -eq 0 ]; then
+ OS_LOGS_PATH="$(dirname "$LOGS_PATH")/os-logs"
+ OS_JSONS_DIR="$JSONS_DIR"
+ mkdir -p "$OS_JSONS_DIR"
+ OS_JSONS=()
+
+ # SR band OS logs and post-script checks
+ SR_OS_LOGS_JSON="$OS_JSONS_DIR/os_test.json"
+ python3 "$SCRIPTS_PATH/os_tests/sr_logs_to_json.py" \
+ "$OS_LOGS_PATH" \
+ "$POST_SCRIPT_LOG" \
+ "$SR_OS_LOGS_JSON"
+ if [ $? -eq 0 ]; then
+ OS_JSONS+=("$SR_OS_LOGS_JSON")
+ apply_waivers "os Tests" "$SR_OS_LOGS_JSON"
+ OS_TESTS_PROCESSED=1
+ else
+ echo -e "${RED}ERROR: SR OS logs parsing to json failed.${NC}"
+ fi
+
+ if [ ${#OS_JSONS[@]} -gt 0 ]; then
+ OS_DETAILED_HTML="$HTMLS_DIR/os_tests_detailed.html"
+ OS_SUMMARY_HTML="$HTMLS_DIR/os_tests_summary.html"
+ python3 "$SCRIPTS_PATH/os_tests/json_to_html.py" \
+ "${OS_JSONS[@]}" \
+ "$OS_DETAILED_HTML" \
+ "$OS_SUMMARY_HTML" \
+ --include-drop-down
+ fi
+fi
+
################################################################################
# UEFI version
################################################################################
diff --git a/common/log_parser/merge_jsons.py b/common/log_parser/merge_jsons.py
index 20405085..4bc74cdc 100755
--- a/common/log_parser/merge_jsons.py
+++ b/common/log_parser/merge_jsons.py
@@ -76,6 +76,7 @@
("SCT", "M"),
("FWTS", "M"),
("BSA", "M"),
+ ("OS_TEST", "M"),
("BBSR-SCT", "EM"),
("BBSR-FWTS", "EM"),
("BBSR-TPM", "EM"),
@@ -362,6 +363,10 @@ def merge_json_files(json_files, output_file):
elif os.path.basename(json_path).lower() == "ethtool_test.json":
section_name = "Suite_Name: Ethtool Test"
suite_key = "ETHTOOL_TEST"
+ elif os.path.basename(json_path).lower() == "os_test.json":
+ section_name = "Suite_Name: OS Tests"
+ suite_key = "OS_TEST"
+ _REQUIREMENT_MAP["OS_TEST"] = "M"
elif "ethtool_test" in fn.lower():
base_name_no_ext = os.path.splitext(os.path.basename(json_path))[0]
section_name = f"Suite_Name: OS Tests - {base_name_no_ext}"
diff --git a/common/log_parser/os_tests/json_to_html.py b/common/log_parser/os_tests/json_to_html.py
index dd5b6099..5edfa629 100644
--- a/common/log_parser/os_tests/json_to_html.py
+++ b/common/log_parser/os_tests/json_to_html.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright (c) 2024-2025, Arm Limited or its affiliates. All rights reserved.
+# Copyright (c) 2026, Arm Limited or its affiliates. All rights reserved.
# SPDX-License-Identifier : Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -51,14 +51,26 @@ def detect_columns_used(subtests):
}
# Function to generate bar chart for test results
-def generate_bar_chart(suite_summary):
- labels = ['Passed', 'Failed', 'Skipped']
- sizes = [
- suite_summary.get('total_passed', 0),
- suite_summary.get('total_failed', 0),
- suite_summary.get('total_skipped', 0)
- ]
- colors = ['#66bb6a', '#ef5350', '#f39c12']
+def generate_bar_chart(suite_summary, show_extended=False):
+ if show_extended:
+ labels = ['Passed', 'Failed', 'Failed w/ Waiver', 'Aborted', 'Skipped', 'Warnings']
+ sizes = [
+ suite_summary.get('total_passed', 0),
+ suite_summary.get('total_failed', 0),
+ suite_summary.get('total_failed_with_waiver', 0),
+ suite_summary.get('total_aborted', 0),
+ suite_summary.get('total_skipped', 0),
+ suite_summary.get('total_warnings', 0)
+ ]
+ colors = ['#66bb6a', '#ef5350', '#f39c12', '#95a5a6', '#f1c40f', '#f39c12']
+ else:
+ labels = ['Passed', 'Failed', 'Skipped']
+ sizes = [
+ suite_summary.get('total_passed', 0),
+ suite_summary.get('total_failed', 0),
+ suite_summary.get('total_skipped', 0)
+ ]
+ colors = ['#66bb6a', '#ef5350', '#f39c12']
plt.figure(figsize=(8, 6))
bars = plt.bar(labels, sizes, color=colors, edgecolor='black')
@@ -115,7 +127,7 @@ def get_subtest_status(subtest_result):
return 'INFO' # For informational entries
# Function to generate HTML content for both summary and detailed pages
-def generate_html(suite_summary, test_results_list, output_html_path, is_summary_page=True, include_drop_down=False):
+def generate_html(suite_summary, test_results_list, output_html_path, is_summary_page=True, include_drop_down=False, show_extended_summary=False):
# Set the test suite name to 'OS Tests'
test_suite_name = 'OS Tests'
@@ -273,10 +285,26 @@ def generate_html(suite_summary, test_results_list, output_html_path, is_summary
Failed |
{{ total_failed }} |
+ {% if show_extended_summary %}
+
+ | Failed with Waiver |
+ {{ total_failed_with_waiver }} |
+
+
+ | Aborted |
+ {{ total_aborted }} |
+
+ {% endif %}
| Skipped |
{{ total_skipped }} |
+ {% if show_extended_summary %}
+
+ | Warnings |
+ {{ total_warnings }} |
+
+ {% endif %}
@@ -342,6 +370,11 @@ def generate_html(suite_summary, test_results_list, output_html_path, is_summary
{% set _ = all_reasons.append(reason) %}
{% endfor %}
{% endif %}
+ {% if subtest.sub_test_result.warning_reasons %}
+ {% for reason in subtest.sub_test_result.warning_reasons %}
+ {% set _ = all_reasons.append(reason) %}
+ {% endfor %}
+ {% endif %}
{% if subtest.sub_test_result.skip_reasons %}
{% for reason in subtest.sub_test_result.skip_reasons %}
{% set _ = all_reasons.append(reason) %}
@@ -367,11 +400,21 @@ def generate_html(suite_summary, test_results_list, output_html_path, is_summary
""")
# Calculate total tests
- total_tests = suite_summary.get('total_passed', 0) + suite_summary.get('total_failed', 0) + suite_summary.get('total_skipped', 0)
+ total_tests = (
+ suite_summary.get('total_passed', 0)
+ + suite_summary.get('total_failed', 0)
+ + suite_summary.get('total_skipped', 0)
+ )
+ if show_extended_summary:
+ total_tests += (
+ suite_summary.get('total_aborted', 0)
+ + suite_summary.get('total_failed_with_waiver', 0)
+ + suite_summary.get('total_warnings', 0)
+ )
# If not summary page, generate chart data
if not is_summary_page:
- chart_data = generate_bar_chart(suite_summary)
+ chart_data = generate_bar_chart(suite_summary, show_extended_summary)
else:
chart_data = None # No chart data for summary page
@@ -382,9 +425,13 @@ def generate_html(suite_summary, test_results_list, output_html_path, is_summary
total_passed=suite_summary.get("total_passed", 0),
total_failed=suite_summary.get("total_failed", 0),
total_skipped=suite_summary.get("total_skipped", 0),
+ total_failed_with_waiver=suite_summary.get("total_failed_with_waiver", 0),
+ total_aborted=suite_summary.get("total_aborted", 0),
+ total_warnings=suite_summary.get("total_warnings", 0),
test_results_list=test_results_list,
is_summary_page=is_summary_page,
include_drop_down=include_drop_down,
+ show_extended_summary=show_extended_summary,
chart_data=chart_data, # Will be None if is_summary_page is True
enumerate=enumerate,
get_subtest_status=get_subtest_status # Pass the function to the template
@@ -408,9 +455,17 @@ def main():
total_passed = 0
total_failed = 0
total_skipped = 0
+ total_aborted = 0
+ total_warnings = 0
+ total_failed_with_waiver = 0
boot_sources_paths = args.boot_sources_paths if args.boot_sources_paths else []
+ sr_single_mode = (
+ len(args.input_json_files) == 1
+ and os.path.basename(args.input_json_files[0]).lower() == "os_test.json"
+ )
+
for idx, input_json_file in enumerate(args.input_json_files):
with open(input_json_file, 'r') as json_file:
try:
@@ -422,67 +477,89 @@ def main():
test_results = data.get("test_results", [])
os_name = data.get("os_name", "Unknown")
if test_results:
- if idx < len(boot_sources_paths):
- boot_sources_path = boot_sources_paths[idx]
- else:
- boot_sources_path = "Unknown"
-
- if os_name == "Unknown" and boot_sources_path != "Unknown":
- # Try to extract OS name from the boot_sources_path
- os_name = boot_sources_path.split('/')[-2]
-
- # Insert the Boot Sources test
- boot_sources_test = {
- "Test_suite_name": "Boot Sources",
- "Test_suite_description": "Check for boot sources",
- "Test_case": f"Boot Sources for {os_name}",
- "Test_case_description": f"Please review the boot source OS logs for {os_name} - path of {boot_sources_path}",
- "subtests": [],
- "is_boot_source": True
- }
- test_results.append(boot_sources_test)
+ is_sr_os_logs = os.path.basename(input_json_file).lower() == "os_test.json"
+ if not is_sr_os_logs:
+ if idx < len(boot_sources_paths):
+ boot_sources_path = boot_sources_paths[idx]
+ else:
+ boot_sources_path = "Unknown"
+
+ if os_name == "Unknown" and boot_sources_path != "Unknown":
+ # Try to extract OS name from the boot_sources_path
+ os_name = boot_sources_path.split('/')[-2]
+
+ # Insert the Boot Sources test
+ boot_sources_test = {
+ "Test_suite_name": "Boot Sources",
+ "Test_suite_description": "Check for boot sources",
+ "Test_case": f"Boot Sources for {os_name}",
+ "Test_case_description": f"Please review the boot source OS logs for {os_name} - path of {boot_sources_path}",
+ "subtests": [],
+ "is_boot_source": True
+ }
+ test_results.append(boot_sources_test)
+
+ if sr_single_mode and is_sr_os_logs:
+ suite_summary_data = data.get("suite_summary", {})
+ total_passed = suite_summary_data.get("total_passed", 0)
+ total_failed = suite_summary_data.get("total_failed", 0)
+ total_skipped = suite_summary_data.get("total_skipped", 0)
+ total_aborted = suite_summary_data.get("total_aborted", 0)
+ total_warnings = suite_summary_data.get("total_warnings", 0)
+ total_failed_with_waiver = suite_summary_data.get("total_failed_with_waiver", 0)
+ total_tests = (
+ total_passed
+ + total_failed
+ + total_skipped
+ + total_aborted
+ + total_failed_with_waiver
+ + total_warnings
+ )
# Tally pass/fail/skip
- for test in test_results:
- if test.get('is_boot_source'):
- continue
-
- total_tests += 1
- test_status = 'PASSED'
- has_skipped = False
- has_pass = False
-
- if test.get('subtests'):
- for subtest in test['subtests']:
- subtest_status = get_subtest_status(subtest['sub_test_result'])
- if subtest_status == 'FAILED':
- test_status = 'FAILED'
- break
- elif subtest_status == 'SKIPPED':
- has_skipped = True
- elif subtest_status not in ('PASSED', 'SKIPPED'):
- # treat any other status as failure
- test_status = 'FAILED'
- break
- elif subtest_status == 'PASSED':
- has_pass = True
+ if not (sr_single_mode and is_sr_os_logs):
+ for test in test_results:
+ if test.get('is_boot_source'):
+ continue
+
+ total_tests += 1
+ test_status = 'PASSED'
+ has_skipped = False
+ has_pass = False
+
+ if test.get('subtests'):
+ for subtest in test['subtests']:
+ subtest_status = get_subtest_status(subtest['sub_test_result'])
+ if subtest_status == 'FAILED':
+ test_status = 'FAILED'
+ break
+ elif subtest_status == 'SKIPPED':
+ has_skipped = True
+ elif subtest_status not in ('PASSED', 'SKIPPED'):
+ # treat any other status as failure
+ test_status = 'FAILED'
+ break
+ elif subtest_status == 'PASSED':
+ has_pass = True
+ else:
+ if test_status != 'FAILED':
+ test_status = 'PASSED' if has_pass else 'SKIPPED'
else:
- if test_status != 'FAILED':
- test_status = 'PASSED' if has_pass else 'SKIPPED'
- else:
- test_status = 'SKIPPED'
+ test_status = 'SKIPPED'
- if test_status == 'PASSED':
- total_passed += 1
- elif test_status == 'FAILED':
- total_failed += 1
- else: # 'SKIPPED' or fallback
- total_skipped += 1
+ if test_status == 'PASSED':
+ total_passed += 1
+ elif test_status == 'FAILED':
+ total_failed += 1
+ else: # 'SKIPPED' or fallback
+ total_skipped += 1
#
# For each test, figure out which columns to show
#
for t in test_results:
+ if not t.get("Test_suite_name") and t.get("Test_suite"):
+ t["Test_suite_name"] = t.get("Test_suite")
subtests = t.get("subtests", [])
t["columns_used"] = detect_columns_used(subtests)
@@ -493,6 +570,9 @@ def main():
'total_passed': total_passed,
'total_failed': total_failed,
'total_skipped': total_skipped,
+ 'total_aborted': total_aborted,
+ 'total_warnings': total_warnings,
+ 'total_failed_with_waiver': total_failed_with_waiver
}
if total_tests == 0:
@@ -505,7 +585,8 @@ def main():
test_results_list,
args.detailed_html_file,
is_summary_page=False,
- include_drop_down=args.include_drop_down
+ include_drop_down=args.include_drop_down,
+ show_extended_summary=sr_single_mode
)
# Generate the summary page
@@ -513,7 +594,8 @@ def main():
suite_summary,
test_results_list,
args.summary_html_file,
- is_summary_page=True
+ is_summary_page=True,
+ show_extended_summary=sr_single_mode
)
if __name__ == "__main__":
diff --git a/common/log_parser/os_tests/sr_logs_to_json.py b/common/log_parser/os_tests/sr_logs_to_json.py
new file mode 100644
index 00000000..2f1219da
--- /dev/null
+++ b/common/log_parser/os_tests/sr_logs_to_json.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python3
+# Copyright (c) 2026, Arm Limited or its affiliates. All rights reserved.
+# SPDX-License-Identifier : Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import sys
+
+OS_RELEASE_FILE_NAME = "cat-etc-os-release.txt"
+
+def create_subtest(subtest_number, description, status, reason=""):
+ result = {
+ "sub_Test_Number": str(subtest_number),
+ "sub_Test_Description": description,
+ "sub_test_result": {
+ "PASSED": 1 if status == "PASSED" else 0,
+ "FAILED": 1 if status == "FAILED" else 0,
+ "FAILED_WITH_WAIVER": 0,
+ "ABORTED": 0,
+ "SKIPPED": 1 if status == "SKIPPED" else 0,
+ "WARNINGS": 1 if status == "WARNINGS" else 0,
+ "pass_reasons": [reason] if (status == "PASSED" and reason) else [],
+ "fail_reasons": [reason] if (status == "FAILED" and reason) else [],
+ "abort_reasons": [],
+ "skip_reasons": [reason] if (status == "SKIPPED" and reason) else [],
+ "warning_reasons": [reason] if (status == "WARNINGS" and reason) else [],
+ "waiver_reason": ""
+ }
+ }
+ return result
+
+def update_suite_summary(suite_summary, status):
+ key_map = {
+ "PASSED": "total_passed",
+ "FAILED": "total_failed",
+ "SKIPPED": "total_skipped",
+ "ABORTED": "total_aborted",
+ "WARNINGS": "total_warnings"
+ }
+ if status in key_map:
+ suite_summary[key_map[status]] += 1
+
+def collect_os_release_files(os_logs_path):
+ release_files = []
+ if not os.path.isdir(os_logs_path):
+ return release_files
+ for root, _, files in os.walk(os_logs_path):
+ if OS_RELEASE_FILE_NAME in files:
+ release_files.append(os.path.join(root, OS_RELEASE_FILE_NAME))
+ return release_files
+
+def os_dir_from_release_path(os_logs_path, release_path):
+ try:
+ rel_path = os.path.relpath(release_path, os_logs_path)
+ except ValueError:
+ return None
+ parts = rel_path.split(os.sep)
+ return parts[0] if parts else None
+
+def parse_os_release(os_release_path):
+ name = None
+ version_id = None
+ try:
+ with open(os_release_path, "r", encoding="utf-8") as handle:
+ for line in handle:
+ line = line.strip()
+ if line.startswith("NAME=") and name is None:
+ name = line.split("=", 1)[1].strip().strip('"').strip("'")
+ elif line.startswith("VERSION_ID=") and version_id is None:
+ version_id = line.split("=", 1)[1].strip().strip('"').strip("'")
+ if name and version_id:
+ break
+ except OSError:
+ return None, None
+ return name, version_id
+
+def parse_post_script_errors(log_path, tokens):
+ errors = []
+ if not os.path.isfile(log_path):
+ return errors
+ tokens_lower = [t.lower() for t in tokens if t]
+ with open(log_path, "r", encoding="utf-8") as handle:
+ for line in handle:
+ if not line.startswith("ERROR"):
+ continue
+ lower_line = line.lower()
+ if any(token in lower_line for token in tokens_lower):
+ errors.append(line.strip())
+ return errors
+
+def build_results(os_logs_path, post_script_log):
+ suite_summary = {
+ "total_passed": 0,
+ "total_failed": 0,
+ "total_skipped": 0,
+ "total_aborted": 0,
+ "total_warnings": 0,
+ "total_failed_with_waivers": 0
+ }
+
+ test_suite = {
+ "Test_suite": "os_test",
+ "Test_suite_description": "os test checks",
+ "Test_case": "os_testing",
+ "Test_case_description": "OS logs validation and post script checks",
+ "subtests": [],
+ "test_suite_summary": suite_summary.copy()
+ }
+
+ subtest_number = 1
+
+ rhel_info = None
+ sle_info = None
+ all_os_dirs = set()
+
+ for release_path in collect_os_release_files(os_logs_path):
+ name, version_id = parse_os_release(release_path)
+ if not name or not version_id:
+ continue
+ os_dir = os_dir_from_release_path(os_logs_path, release_path)
+ if os_dir:
+ all_os_dirs.add(os_dir)
+ name_lower = name.lower()
+ if rhel_info is None and ("red hat" in name_lower or "redhat" in name_lower):
+ rhel_info = (name, version_id, release_path, os_dir)
+ if sle_info is None and ("sles" in name_lower or "suse" in name_lower):
+ sle_info = (name, version_id, release_path, os_dir)
+ if rhel_info and sle_info:
+ break
+
+ def add_presence_subtest(label, os_info):
+ nonlocal subtest_number
+ desc = f"Is {label.lower()} logs present or not"
+ if not os_info:
+ sub = create_subtest(subtest_number, desc, "FAILED", "OS logs missing")
+ else:
+ name, version_id, _, _ = os_info
+ reason = f"{name} {version_id}"
+ sub = create_subtest(subtest_number, desc, "PASSED", reason)
+ test_suite["subtests"].append(sub)
+ update_suite_summary(test_suite["test_suite_summary"], "FAILED" if sub["sub_test_result"]["FAILED"] else "PASSED")
+ subtest_number += 1
+
+ add_presence_subtest("RHEL", rhel_info)
+ add_presence_subtest("SLE", sle_info)
+
+ errors = parse_post_script_errors(post_script_log, ["os-logs"])
+ if not os.path.isfile(post_script_log):
+ desc = f"post-script.log not found at {post_script_log}"
+ sub = create_subtest(subtest_number, desc, "FAILED", "post-script.log missing")
+ test_suite["subtests"].append(sub)
+ update_suite_summary(test_suite["test_suite_summary"], "FAILED")
+ subtest_number += 1
+ elif errors:
+ rhel_dir = rhel_info[3] if rhel_info else None
+ sle_dir = sle_info[3] if sle_info else None
+ other_dirs = {d for d in all_os_dirs if d and d not in {rhel_dir, sle_dir}}
+ for error_line in errors:
+ cleaned = error_line.strip()
+ if cleaned.startswith("ERROR"):
+ cleaned = cleaned[len("ERROR"):].strip()
+ reason = "post-script error"
+ if ":" in cleaned:
+ desc_part, reason_part = cleaned.rsplit(":", 1)
+ desc = f"post-script checks:{desc_part.strip()}"
+ reason = reason_part.strip()
+ else:
+ desc = f"post-script checks:{cleaned}"
+ lower_line = error_line.lower()
+ if (rhel_dir and rhel_dir.lower() in lower_line) or (sle_dir and sle_dir.lower() in lower_line):
+ status = "FAILED"
+ elif any(d.lower() in lower_line for d in other_dirs):
+ status = "WARNINGS"
+ else:
+ status = "WARNINGS"
+ sub = create_subtest(subtest_number, desc, status, reason)
+ test_suite["subtests"].append(sub)
+ update_suite_summary(test_suite["test_suite_summary"], status)
+ subtest_number += 1
+ else:
+ desc = "No post-script errors found for OS logs"
+ sub = create_subtest(subtest_number, desc, "PASSED")
+ test_suite["subtests"].append(sub)
+ update_suite_summary(test_suite["test_suite_summary"], "PASSED")
+ subtest_number += 1
+
+ suite_summary = {
+ "total_passed": test_suite["test_suite_summary"]["total_passed"],
+ "total_failed": test_suite["test_suite_summary"]["total_failed"],
+ "total_failed_with_waivers": test_suite["test_suite_summary"]["total_failed_with_waivers"],
+ "total_aborted": test_suite["test_suite_summary"]["total_aborted"],
+ "total_skipped": test_suite["test_suite_summary"]["total_skipped"],
+ "total_warnings": test_suite["test_suite_summary"]["total_warnings"]
+ }
+
+ return {
+ "test_results": [test_suite],
+ "suite_summary": suite_summary
+ }
+
+def main():
+ if len(sys.argv) != 4:
+ print(f"Usage: {sys.argv[0]}