Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions integtest/disabled_output_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
number_of_data_producers = 2
run_duration = 20 # seconds
trigger_rate = 1.0 # Hz
data_rate_slowdown_factor = 1

# Default values for validation parameters
expected_number_of_data_files = 2
Expand Down Expand Up @@ -95,13 +94,6 @@
# which is the data file that is used to emulated the data. The current default for that field
# specifies a set of WIBEth frames from a relatively recent run at EHN1.)

conf_dict.config_substitutions.append(
data_classes.attribute_substitution(
obj_id=conf_dict.session,
obj_class="Session",
updates={"data_rate_slowdown_factor": data_rate_slowdown_factor},
)
)
conf_dict.config_substitutions.append(
data_classes.attribute_substitution(
obj_class="RandomTCMakerConf",
Expand Down
69 changes: 19 additions & 50 deletions integtest/max_file_size_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import integrationtest.log_file_checks as log_file_checks
import integrationtest.data_classes as data_classes
import integrationtest.resource_validation as resource_validation
from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir

pytest_plugins = "integrationtest.integrationtest_drunc"

Expand All @@ -17,7 +18,6 @@
# Values that help determine the running conditions
number_of_data_producers = 2
number_of_readout_apps = 3
data_rate_slowdown_factor = 1

# Default values for validation parameters
check_for_logfile_errors = True
Expand All @@ -33,7 +33,8 @@
"fragment_type_description": "TP Stream",
"fragment_type": "Trigger_Primitive",
"expected_fragment_count": number_of_readout_apps * 3,
"frag_counts_by_record_ordinal": {"first": {"min_count": 1, "max_count": number_of_readout_apps * 3},
"frag_counts_by_record_ordinal": { "first": {"min_count": 1, "max_count": number_of_readout_apps * 3},
"last": {"min_count": 1, "max_count": number_of_readout_apps * 3},
"default": {"min_count": number_of_readout_apps * 3, "max_count": number_of_readout_apps * 3} },
"min_size_bytes": 0, # not checked
"max_size_bytes": 0, # not checked
Expand Down Expand Up @@ -88,14 +89,14 @@
}

# Determine if the conditions are right for these tests
resval = resource_validation.ResourceValidator()
resval.require_cpu_count(20) # number of data sources (6) times 3 threads each plus a couple more for everything else
resval.require_free_memory_gb(12) # the maximum amount that we observe being used ('free -h')
resval.require_total_memory_gb(24) # double what we need; trying to be kind to others
actual_output_path = "/tmp"
resval.require_free_disk_space_gb(actual_output_path, 5) # approximately what we use
resval.require_total_disk_space_gb(actual_output_path, 10) # factor of two to reserve some for others
resval_debug_string = resval.get_debug_string()
resource_validator = resource_validation.ResourceValidator()
resource_validator.cpu_count_needs(15, 30) # 2 for each data source (6) plus 3 more for everything else
resource_validator.free_memory_needs(15, 30) # 25% more than what we observe being used ('free -h')
resource_validator.total_memory_needs() # no specific request, but it's useful to see how much is available
actual_output_path = get_pytest_tmpdir()
resource_validator.free_disk_space_needs(actual_output_path, 6, 10) # 20% more than what we need
resource_validator.total_disk_space_needs(actual_output_path, recommended_total_disk_space=15) # double what we need
resval_debug_string = resource_validator.get_debug_string()
print(f"{resval_debug_string}")

# The next three variable declarations *must* be present as globals in the test
Expand All @@ -115,13 +116,6 @@
"asset://?checksum=dd156b4895f1b06a06b6ff38e37bd798" # WIBEth All Zeros
)

conf_dict.config_substitutions.append(
data_classes.attribute_substitution(
obj_id=conf_dict.session,
obj_class="Session",
updates={"data_rate_slowdown_factor": data_rate_slowdown_factor},
)
)
conf_dict.config_substitutions.append(
data_classes.attribute_substitution(
obj_class="LatencyBuffer", updates={"size": 200000}
Expand Down Expand Up @@ -207,28 +201,19 @@
}

# The commands to run in dunerc, as a list
if resval.this_computer_has_sufficient_resources:
dunerc_command_list = (
"boot conf wait 5".split()
+ "start --run-number 101 wait 1 enable-triggers wait 178".split()
+ "disable-triggers wait 2 drain-dataflow wait 2 stop-trigger-sources stop ".split()
+ "start --run-number 102 wait 1 enable-triggers wait 128".split()
+ "disable-triggers wait 2 drain-dataflow wait 2 stop-trigger-sources stop ".split()
+ " scrap terminate".split()
)
else:
dunerc_command_list = ["wait", "1"]
dunerc_command_list = (
"boot conf wait 5".split()
+ "start --run-number 101 wait 1 enable-triggers wait 178".split()
+ "disable-triggers wait 2 drain-dataflow wait 2 stop-trigger-sources stop ".split()
+ "start --run-number 102 wait 1 enable-triggers wait 128".split()
+ "disable-triggers wait 2 drain-dataflow wait 2 stop-trigger-sources stop ".split()
+ " scrap terminate".split()
)

# The tests themselves


def test_dunerc_success(run_dunerc):
if not resval.this_computer_has_sufficient_resources:
resval_report_string = resval.get_insufficient_resources_report()
print(f"{resval_report_string}")
resval_summary_string = resval.get_insufficient_resources_summary()
pytest.skip(f"{resval_summary_string}")

# print the name of the current test
current_test = os.environ.get("PYTEST_CURRENT_TEST")
match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test)
Expand All @@ -244,10 +229,6 @@ def test_dunerc_success(run_dunerc):


def test_log_files(run_dunerc):
if not resval.this_computer_has_sufficient_resources:
resval_summary_string = resval.get_insufficient_resources_summary()
pytest.skip(f"{resval_summary_string}")

if check_for_logfile_errors:
# Check that there are no warnings or errors in the log files
assert log_file_checks.logs_are_error_free(
Expand All @@ -256,10 +237,6 @@ def test_log_files(run_dunerc):


def test_data_files(run_dunerc):
if not resval.this_computer_has_sufficient_resources:
resval_summary_string = resval.get_insufficient_resources_summary()
pytest.skip(f"{resval_summary_string}")

fragment_check_list = [triggercandidate_frag_params, hsi_frag_params, wibeth_frag_params]
fragment_check_list.append(triggerprimitive_frag_params)
fragment_check_list.append(triggeractivity_frag_params)
Expand Down Expand Up @@ -287,10 +264,6 @@ def test_data_files(run_dunerc):


def test_tpstream_files(run_dunerc):
if not resval.this_computer_has_sufficient_resources:
resval_summary_string = resval.get_insufficient_resources_summary()
pytest.skip(f"{resval_summary_string}")

tpstream_files = run_dunerc.tpset_files
fragment_check_list = [wibeth_tpset_params] # WIBEth

Expand All @@ -312,10 +285,6 @@ def test_tpstream_files(run_dunerc):


def test_cleanup(run_dunerc):
if not resval.this_computer_has_sufficient_resources:
resval_summary_string = resval.get_insufficient_resources_summary()
pytest.skip(f"{resval_summary_string}")

pathlist_string = ""
filelist_string = ""
for data_file in run_dunerc.data_files:
Expand Down
8 changes: 0 additions & 8 deletions integtest/offline_prod_run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

# Values that help determine the running conditions
number_of_data_producers = 1
data_rate_slowdown_factor = 1 # 10 for ProtoWIB/DuneWIB
run_duration = 5 # seconds

# Default values for validation parameters
Expand Down Expand Up @@ -69,13 +68,6 @@
conf_dict.tpg_enabled = False
conf_dict.fake_hsi_enabled = True

conf_dict.config_substitutions.append(
data_classes.attribute_substitution(
obj_id=conf_dict.session,
obj_class="Session",
updates={"data_rate_slowdown_factor": data_rate_slowdown_factor},
)
)
conf_dict.config_substitutions.append(
data_classes.attribute_substitution(obj_class="LatencyBuffer", updates={"size": 50000})
)
Expand Down
1 change: 0 additions & 1 deletion integtest/trmonrequestor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
number_of_dataflow_apps = 2
run_duration = 20 # seconds
trigger_rate = 1.0 # Hz
data_rate_slowdown_factor = 1
trmon_prescale = 3

# Default values for validation parameters
Expand Down
198 changes: 4 additions & 194 deletions scripts/dfmodules_integtest_bundle.sh
Original file line number Diff line number Diff line change
@@ -1,197 +1,7 @@
#!/bin/bash
# 29-Apr-2025, KAB

integtest_list=( "multiple_data_writers_test.py" "insufficient_disk_space_test.py" "large_trigger_record_test.py" "hdf5_compression_test.py" "disabled_output_test.py" "max_file_size_test.py" "trmonrequestor_test.py" "offline_prod_run_test.py" )
let last_test_index=${#integtest_list[@]}-1
echo ""
echo "Redirecting to dunedaq_integtest_bundle.sh..."
echo ""

usage() {
declare -r script_name=$(basename "$0")
echo """
Usage:
"${script_name}" [option(s)]

Options:
-h, --help : prints out usage information
-f <zero-based index of the first test to be run, default=0>
-l <zero-based index of the last test to be run, default=${last_test_index}>
-k <pipe-delimited string to select which tests will be run ('egrep -i' match to test name)>
-n <number of times to run each individual test, default=1>
-N <number of times to run the full set of selected tests, default=1>
--stop-on-failure : causes the script to stop when one of the integtests reports a failure
--stop-on-skip : causes the script to stop when one of the integtests skips a test
"""
let counter=0
echo "List of available tests:"
for tst in ${integtest_list[@]}; do
echo " ${counter}: $tst"
let counter=${counter}+1
done
echo ""
}

# Removes the ANSI characters associated with formatting, including color coding and font styling
CaptureOutputNoANSI() {
tee -a >(sed -u 's/\x1b\[[0-9;]*m//g' >> "$1")
}
# Captures the output to the specified file, without changing the output
CaptureOutput() {
tee -a $1
}

TEMP=`getopt -o hs:f:l:k:n:N: --long help,stop-on-failure,stop-on-skip -- "$@"`
eval set -- "$TEMP"

let first_test_index=0
let individual_test_requested_iterations=1
let full_set_requested_interations=1
let stop_on_failure=0
let stop_on_skip=0
requested_test_names=

while true; do
case "$1" in
-h|--help)
usage
exit 0
;;
-f)
let first_test_index=$2
shift 2
;;
-l)
let last_test_index=$2
shift 2
;;
-k)
requested_test_names=$2
shift 2
;;
-n)
let individual_test_requested_iterations=$2
shift 2
;;
-N)
let full_set_requested_interations=$2
shift 2
;;
--stop-on-failure)
let stop_on_failure=1
shift
;;
--stop-on-skip)
let stop_on_skip=1
shift
;;
--)
shift
break
;;
esac
done

# check if the numad daemon is running
numad_grep_output=`ps -ef | grep numad | grep -v grep`
if [[ "${numad_grep_output}" != "" ]]; then
echo "*********************************************************************"
echo "*** DANGER, DANGER, 'numad' appears to be running on this computer!"
echo "*** 'ps' output: ${numad_grep_output}"
echo "*** <ctrl-c> now if you want to abort this testing."
echo "*********************************************************************"
sleep 3
fi

# other setup
TIMESTAMP=`date '+%Y%m%d%H%M%S'`
mkdir -p /tmp/pytest-of-${USER}
ITGRUNNER_LOG_FILE="/tmp/pytest-of-${USER}/dfmodules_integtest_bundle_${TIMESTAMP}.log"

let number_of_individual_tests=0
let test_index=0
for TEST_NAME in ${integtest_list[@]}; do
if [[ ${test_index} -ge ${first_test_index} && ${test_index} -le ${last_test_index} ]]; then
requested_test=`echo ${TEST_NAME} | egrep -i ${requested_test_names:-${TEST_NAME}}`
if [[ "${requested_test}" != "" ]]; then
let number_of_individual_tests=${number_of_individual_tests}+1
fi
fi
let test_index=${test_index}+1
done
let total_number_of_tests=${number_of_individual_tests}*${individual_test_requested_iterations}*${full_set_requested_interations}

# run the tests
let overall_test_index=0 # this is only used for user feedback
let full_set_loop_count=0
while [[ ${full_set_loop_count} -lt ${full_set_requested_interations} ]]; do
let test_index=0
for TEST_NAME in ${integtest_list[@]}; do
if [[ ${test_index} -ge ${first_test_index} && ${test_index} -le ${last_test_index} ]]; then
requested_test=`echo ${TEST_NAME} | egrep -i ${requested_test_names:-${TEST_NAME}}`
if [[ "${requested_test}" != "" ]]; then
let individual_loop_count=0
while [[ ${individual_loop_count} -lt ${individual_test_requested_iterations} ]]; do
let overall_test_index=${overall_test_index}+1
echo ""
echo -e "\U0001F535 \033[0;34mStarting test ${overall_test_index} of ${total_number_of_tests}...\033[0m \U0001F535" | CaptureOutput ${ITGRUNNER_LOG_FILE}

echo -e "\u2B95 \033[0;1mRunning ${TEST_NAME}\033[0m \u2B05" | CaptureOutput ${ITGRUNNER_LOG_FILE}
if [[ -e "./${TEST_NAME}" ]]; then
pytest -s ./${TEST_NAME} | CaptureOutputNoANSI ${ITGRUNNER_LOG_FILE}
elif [[ -e "${DBT_AREA_ROOT}/sourcecode/dfmodules/integtest/${TEST_NAME}" ]]; then
if [[ -w "${DBT_AREA_ROOT}" ]]; then
pytest -s ${DBT_AREA_ROOT}/sourcecode/dfmodules/integtest/${TEST_NAME} | CaptureOutputNoANSI ${ITGRUNNER_LOG_FILE}
else
pytest -s -p no:cacheprovider ${DBT_AREA_ROOT}/sourcecode/dfmodules/integtest/${TEST_NAME} | CaptureOutputNoANSI ${ITGRUNNER_LOG_FILE}
fi
else
pytest -s -p no:cacheprovider ${DFMODULES_SHARE}/integtest/${TEST_NAME} | CaptureOutputNoANSI ${ITGRUNNER_LOG_FILE}
fi
let pytest_return_code=${PIPESTATUS[0]}

let individual_loop_count=${individual_loop_count}+1

if [[ ${stop_on_failure} -gt 0 ]]; then
if [[ ${pytest_return_code} -ne 0 ]]; then
break 3
fi
fi
if [[ ${stop_on_skip} -gt 0 ]]; then
search_result=`tail -20 ${ITGRUNNER_LOG_FILE} | grep -i skip`
#echo "skip search result is ${search_result}"
if [[ ${search_result} != "" ]]; then
break 3
fi
fi
done
fi
fi
let test_index=${test_index}+1
done

let full_set_loop_count=${full_set_loop_count}+1
done

# print out summary information
echo "" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "+++++++++++++++++++++++++++++++++++++++++++++++++" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "++++++++++++++++++++ SUMMARY ++++++++++++++++++++" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "+++++++++++++++++++++++++++++++++++++++++++++++++" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "" | CaptureOutput ${ITGRUNNER_LOG_FILE}
date | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "Log file is: ${ITGRUNNER_LOG_FILE}" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "" | CaptureOutput ${ITGRUNNER_LOG_FILE}
egrep $'=====|\u2B95' ${ITGRUNNER_LOG_FILE} | egrep ' in |Running' | CaptureOutput ${ITGRUNNER_LOG_FILE}

# check again if the numad daemon is running
numad_grep_output=`ps -ef | grep numad | grep -v grep`
if [[ "${numad_grep_output}" != "" ]]; then
echo "" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "********************************************************************************" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "*** WARNING: 'numad' appears to be running on this computer!" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "*** 'ps' output: ${numad_grep_output}" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "*** This daemon can adversely affect the running of these tests, especially ones" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "*** that are resource intensive in the Readout Apps. This is because numad moves" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "*** processes (threads?) to different cores/numa nodes periodically, and that" | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "*** context switch can disrupt the stable running of the DAQ processes." | CaptureOutput ${ITGRUNNER_LOG_FILE}
echo "********************************************************************************" | CaptureOutput ${ITGRUNNER_LOG_FILE}
fi
dunedaq_integtest_bundle.sh -r dfmodules $@