From 33ea77a120ba3bd7af5b673bd35dcd1451d255e4 Mon Sep 17 00:00:00 2001 From: Trevor Date: Wed, 24 Jun 2020 10:47:57 -0700 Subject: [PATCH 01/38] cloud test setup documentation (#495) --- cmd/build | 5 +- docs/add_test.md | 2 +- docs/cloud_tests.md | 133 ++++++++++++++++++++++++++++++++++++ docs/integration_testing.md | 83 ---------------------- docs/orchestration.md | 2 +- subset/cloud/test_udmi | 5 ++ 6 files changed, 143 insertions(+), 87 deletions(-) create mode 100644 docs/cloud_tests.md delete mode 100644 docs/integration_testing.md diff --git a/cmd/build b/cmd/build index dc428ba90d..f4f09dcf32 100755 --- a/cmd/build +++ b/cmd/build @@ -26,11 +26,12 @@ DOCKER_IMAGE_VER=docker_images.ver cd $ROOT source etc/config_base.sh -host_tests=$host_tests bin/docker_build_files +echo host_tests=$host_tests +test_targets=$host_tests bin/docker_build_files function pull_images { TAG=$1 declare -A test_set - for target in $(host_tests=$host_tests bin/docker_build_files); do + for target in $test_targets; do target=$(echo $target | sed 's|^.*/Dockerfile.||' | echo daqf/$(> local/system.conf` -- Set tests configuration. This, of course, only works for local development when using the `local_tests.conf` config. To -formalize a test and include it in the overal system build it should be included in +formalize a test and include it in the overall system build it should be included in `config/modules/all.conf`. ## Component Build diff --git a/docs/cloud_tests.md b/docs/cloud_tests.md new file mode 100644 index 0000000000..6ed8a9efed --- /dev/null +++ b/docs/cloud_tests.md @@ -0,0 +1,133 @@ +# Cloud Connection Testing + +A number of additional setup steps are required for enabling testing against "smart devices" +that communicate with the cloud. The tests themselves are part of the `subset/cloud/test_udmi` +module included in the standard DAQ distro. The same basic device-to-cloud validation test +pipeline can be done manually and automatically (through DAQ); it's instructive to fully +understand the manual test pipeline before engaging with the automated setup. + +## Manual Test Pipeline + +The overall device-to-cloud pipeline looks something like the following: + +* Device sends data to the cloud. There's two kinds of devices: + * A faux _reference design_ device called [pubber](pubber.md), which is a completely contained + software device. + * An actual physical device. The setup and configuration of that device will be manufacturer + dependent and so is out of scope for this (DAQ) documentation. +* A configured GCP IoT Core project, registry, and device entry. The +[GCP docs for IoT Core](https://cloud.google.com/iot/docs/how-tos/devices) describe the basics. The +key part is the _authentication key_ (hahaha) that needs to be setup between the local device and +cloud device entry. +* The IoT Core registry is configured with a _PubSub topic_ (not to be confused with an _MQTT topic_), +that provides the bridge between incoming data and consumers of that data. See the GCP documentation +on PubSub for more details. +* (optional) The `gcloud` command line can be used to validate that data is being sent from the +device to the cloud. Something like +`gcloud pubsub subscriptions pull --auto-ack projects/{project}/subscriptions/{sub_id}`. +(Complete documentation for how to use `gcloud` commands is out of scope of this documentation.) +* The [validator tool](validator.md) is what programmatically validates a device data stream, and +is what is ultimately used by `test_udmi` to validate device-cloud communication. + +## Base Local Test Setup + +* The `udmi` module needs to be enabled in build. When running `cmd/build` there should be a line +like `subset/cloud/Dockerfile.test_udmi` in the startup logs. +This is enabled through the `host_tests` config parameter, +which can be set to `config/modules/all.conf` if necessary. On startup, there should be a log +message that includes `udmi`: +``` +Jun 22 08:32:52 runner INFO Configured with tests pass, fail, ping, bacnet, mudgee, nmap, discover, switch, macoui, bacext, tls, password, udmi, manual +``` +* A testing gcp service account `gcp_cred` needs to be setup as described in +[service account setup instructions](service.md). +* The system's default `module_config` needs to enable the `udmi` test, e.g. as per +`resources/setups/baseline/module_config.json`. This can be validated by (runtime) checking +`inst/run-port-01/nodes/udmi01/tmp/module_config.json` to see if it has something like the following: +``` + "udmi": { + "enabled": true + } +``` +* `site_path` config needs to point to a site definition directory, or defaults to `local/site`. +This contains all the site-specific information about devices needed for testing. +* `{site_path}/mac_addrs/{mac_addr}/module_config.json` needs to have a `device_id` defined, e.g. +as in `resources/test_site/mac_addrs/3c5ab41e8f0b/module_config.json`. +* The GCP IoT Core setup needs to have a proper registry and device configred. This can either +be done manually or using the [registrar tool](registrar.md) tool. + +## Integration Testing + +If developing cloud-tests, then the CI build system also needs to have a service account configured +pointing at a suitable GCP project. To run cloud-based tests, setup the Travis `GCP_BASE64_CRED` +env variable with a `base64` encoded service account key for your project. It's recommended to +use a dedicated key with a nice name like `daq-travis`, but not required. Encode the key value +as per below, and cut/paste the resulting string into a +[Travis environment variable](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings) +for a `GCP_BASE64_CRED` variable. Note the `-w 0` option is required for proper parsing/formatting, +as there can't be any newlines in the copied string. + + +$ base64 -w 0 local/gcp_service_account.json +ewoICJ1eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAiYm9zLWRhcS10ZXN0aW5nIiwKICAicHJpd +… +iOiAiaHR0cHM6Ly93LWRhcS10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIgp9Cg== + + +### Travis CI Testing + +* Run the [registrar tool](registrar.md) to properly configure the cloud project. +* `gcp_topic` config to `local/system.conf` as described in this doc. +* Configure test subsystem with proper cloud endpoint in `{test_site}/cloud_iot_config.json`. +* Configure the DUT with the proper cloud device credentials (device specific). For _faux_ devices, this means copying +the associated `rsa_private.pkcs8` file to something like `inst/faux/daq-faux-2/local/` (exact path depends on which faux). +* Test with `bin/registrar`, `pubber/bin/run`, and `bin/validate` manually, before integrated testing through DAQ. + +### Is my Travis set up correctly? + +If Travis is set up correctly, you should see messages at the beginning of the log file: +``` +Setting environment variables from repository settings +$ export DOCKER_USERNAME=[secure] +$ export DOCKER_PASSWORD=[secure] +$ export GCP_BASE64_CRED=[secure] +``` + +Further down there would be more details about the cred itself: +``` +Running test script testing/test_aux.sh +Writing test results to inst/test_aux.out and inst/test_aux.gcp +Decoding GCP_BASE64_CRED to inst/config/gcp_service_account.json +base64 wc: 1 1 3097 +GCP service account is "daq-travis@daq-testing.iam.gserviceaccount.com" +``` + +If the `3097` character count is wildly off, then likely something went wrong with the newlines. + +### Travis Build For "External" Pull Requests + +Travis will not use encrypted environment variables when testing against pull requests +from foreign github repositories, even if you've forked from another repository that you +have full control of via Github. Travis authorization != Github authorization, even if +you sign into Travis using Github! This is as it should be b/c security. see the following +for more info: + +- https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings +- https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions + +If your test is failing from a PR, you'll see something like in a similar log location: + +``` +Encrypted environment variables have been removed for security reasons. +See https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions +Setting environment variables from .travis.yml +$ export DOCKER_STARTUP_TIMEOUT_MS=60000 +$ export DAQ_TEST=aux +``` + +### Other Travis Caveats + +Take note the URL in your browser's address bar when running Travis. You might be on either +travis-ci.com or travis-ci.org. Any particular setup +may end up across both sites for undetermined reasons. Please consult with your browser's +exact URL for more clarity. diff --git a/docs/integration_testing.md b/docs/integration_testing.md deleted file mode 100644 index 1c6435a9b0..0000000000 --- a/docs/integration_testing.md +++ /dev/null @@ -1,83 +0,0 @@ -# Integration Testing - -DAQ currently uses Travis CI for integration testing: https://travis-ci.org/ - -## Configuration - -The `test_udmi` test module uses the Registrar and Validator to check that a device is -properly communicating through Cloud IoT, automated through DAQ. - -### GCP Credential - -To run cloud-based tests, setup the Travis `GCP_BASE64_CRED` env variable with a `base64` encoded -service account key for your project. It's recommended to use a dedicated key with a nice name -like `daq-travis`, but not required. Encode the key value as per below, and cut/paste the -resulting string into a -[Travis environment variable](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings) -for a `GCP_BASE64_CRED` varaible. Note the `-w 0` option is required for proper parsing/formatting, -as there can't be any newlines in the copied string. - - -$ base64 -w 0 local/gcp_service_account.json -ewoICJ1eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAiYm9zLWRhcS10ZXN0aW5nIiwKICAicHJpd -… -iOiAiaHR0cHM6Ly93LWRhcS10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIgp9Cg== - - -## Travis CI Testing - -* Run the [registrar tool](registrar.md) to properly configure the cloud project. -* `gcp_topic` config to `local/system.conf` as described in this doc. -* Configure test subsystem with proper cloud endpoint in `{test_site}/cloud_iot_config.json`. -* Configure the DUT with the proper cloud device credentials (device specific). For _faux_ devices, this means copying -the assocatied `rsa_private.pkcs8` file to someting like `inst/faux/daq-faux-2/local/` (exact path depends on which faux). -* Test with `bin/registrar`, `pubber/bin/run`, and `bin/validate` manually, before integrated testing through DAQ. - -### Is my Travis set up correctly? - -If Travis is set up correctly, you should see messages at the beginning of the log file: -``` -Setting environment variables from repository settings -$ export DOCKER_USERNAME=[secure] -$ export DOCKER_PASSWORD=[secure] -$ export GCP_BASE64_CRED=[secure] -``` - -Further down there would be more details about the cred itself: -``` -Running test script testing/test_aux.sh -Writing test results to inst/test_aux.out and inst/test_aux.gcp -Decoding GCP_BASE64_CRED to inst/config/gcp_service_account.json -base64 wc: 1 1 3097 -GCP service account is "daq-travis@daq-testing.iam.gserviceaccount.com" -``` - -If the `3097` character count is wildly off, then likely something went wrong with the newlines. - -### Travis Build For "External" Pull Requests - -Travis will not use encrypted environment variables when testing against pull requests -from foreign github repositories, even if you've forked from another repository that you -have full control of via Github. Travis authorization != Github authorization, even if -you sign into Travis using Github! This is as it should be b/c security. see the following -for more info: - -- https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings -- https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions - -If your test is failing from a PR, you'll see something like in a similar log location: - -``` -Encrypted environment variables have been removed for security reasons. -See https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions -Setting environment variables from .travis.yml -$ export DOCKER_STARTUP_TIMEOUT_MS=60000 -$ export DAQ_TEST=aux -``` - -### Other Travis Caveats - -Take note the URL in your browser's address bar when running Travis. You might be on either -travis-ci.com or travis-ci.org. Any particular setup -may end up across both sites for undertermined reasons. Please consult with your browser's -exact URL for more clarity. diff --git a/docs/orchestration.md b/docs/orchestration.md index 4804ec356f..703c5235a0 100644 --- a/docs/orchestration.md +++ b/docs/orchestration.md @@ -11,7 +11,7 @@ to change. ## Data Rouces -The overal orchestration capability relies on several simple data sources: +The overall orchestration capability relies on several simple data sources: 1. [Overall network topology](topologies.md), which indicates how the network hardware is configured. 2. [Device MUD files](../mud_files), which provide an [IETF Standard MUD descriptor](https://datatracker.ietf.org/doc/draft-ietf-opsawg-mud/) that describes diff --git a/subset/cloud/test_udmi b/subset/cloud/test_udmi index 4b0cfb2a32..fde175a26d 100755 --- a/subset/cloud/test_udmi +++ b/subset/cloud/test_udmi @@ -1,4 +1,5 @@ #!/bin/bash -e + source reporting.sh REPORT=/tmp/report.txt @@ -87,3 +88,7 @@ function message_report { for message_type in $message_types; do message_report $message_type done + +fgrep RESULT $REPORT + +echo Done with test_udmi From 048bf2059b27bb8fe6e4b8579d6ee44feee8bedf Mon Sep 17 00:00:00 2001 From: Haoli Du Date: Thu, 25 Jun 2020 00:09:30 +0000 Subject: [PATCH 02/38] 1.6.0 release --- docs/changelog.md | 6 ++++++ etc/docker_images.txt | 47 ++++++++++++++++++++++--------------------- etc/docker_images.ver | 2 +- 3 files changed, 31 insertions(+), 24 deletions(-) diff --git a/docs/changelog.md b/docs/changelog.md index a67a7cfd32..98dbfa0745 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,4 +1,10 @@ # Changelog +* 1.6.0 + * cloud test setup documentation (#495) + * Baseline for NTP tests (#494) + * Baseline for DNS test (#492) + * Add manual test summary to test report (#481) + * UDMI logentry schema update (#391) * 1.5.1 * Fix for local-port-as-string issue (#477) * 1.5.0 diff --git a/etc/docker_images.txt b/etc/docker_images.txt index 193e733424..4fd1fc4f47 100644 --- a/etc/docker_images.txt +++ b/etc/docker_images.txt @@ -1,23 +1,24 @@ -daqf/aardvark 34718b2f3fd5 -daqf/default 3ac95db36ee4 -daqf/faucet 45c13344a8ed -daqf/faux1 ecff07f12534 -daqf/faux2 39914ae11741 -daqf/gauge 1431053cf25e -daqf/networking af56b0732100 -daqf/switch 67954aca8dce -daqf/test_bacext 363b6d476ac8 -daqf/test_bacnet 073a0eb5529f -daqf/test_brute 700d986d5e83 -daqf/test_discover ad34b17b41e6 -daqf/test_fail c9a7e6b43bd0 -daqf/test_hold cb120980c658 -daqf/test_macoui a828288c855b -daqf/test_mudgee d4ed15ef1dfc -daqf/test_nmap 78aa5def41e5 -daqf/test_pass 74167ef0df55 -daqf/test_password 471bd1290918 -daqf/test_ping 5618e0243643 -daqf/test_switch 47585fc0876e -daqf/test_tls 9c5f28b74fed -daqf/test_udmi fc13d4c80b0d +daqf/aardvark a16be44f9e25 +daqf/default 0c3cdaf1ea29 +daqf/faucet 6d604669ca66 +daqf/faux1 4db039df8e23 +daqf/faux2 25a7fafb7e45 +daqf/gauge 765578acddba +daqf/networking bfcc9246b0ab +daqf/switch ae42486d4cbd +daqf/test_bacext c7b8e3edf77a +daqf/test_bacnet a72e6e252ba9 +daqf/test_brute 2bf056e2a552 +daqf/test_discover aabd2d3d70f6 +daqf/test_fail 68c70b8e1067 +daqf/test_hold 1a488be2c5ad +daqf/test_macoui b86a88ebc6b5 +daqf/test_manual 6d8f9eb5b231 +daqf/test_mudgee c5feb2e63c82 +daqf/test_nmap 379f01b87c8b +daqf/test_pass 4f4098eeb114 +daqf/test_password 98c312e8415b +daqf/test_ping c86303a534d5 +daqf/test_switch fa40f5010865 +daqf/test_tls 748c578eb01d +daqf/test_udmi b0eb4d8d6cd8 diff --git a/etc/docker_images.ver b/etc/docker_images.ver index 26ca594609..dc1e644a10 100644 --- a/etc/docker_images.ver +++ b/etc/docker_images.ver @@ -1 +1 @@ -1.5.1 +1.6.0 From b2447092d207b4552c4f391bbb37eaa92284a66a Mon Sep 17 00:00:00 2001 From: henry54809 Date: Thu, 25 Jun 2020 08:32:12 -0700 Subject: [PATCH 03/38] fix image pull in cmd/build (#503) --- cmd/build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/build b/cmd/build index f4f09dcf32..834b1078a5 100755 --- a/cmd/build +++ b/cmd/build @@ -27,7 +27,7 @@ DOCKER_IMAGE_VER=docker_images.ver cd $ROOT source etc/config_base.sh echo host_tests=$host_tests -test_targets=$host_tests bin/docker_build_files +test_targets=$(host_tests=$host_tests bin/docker_build_files) function pull_images { TAG=$1 declare -A test_set From 588189c5a12c1d93fcc6b67a470071e4c4c087f4 Mon Sep 17 00:00:00 2001 From: Haoli Du Date: Thu, 25 Jun 2020 16:02:53 +0000 Subject: [PATCH 04/38] 1.6.1 release --- docs/changelog.md | 2 ++ etc/docker_images.txt | 48 +++++++++++++++++++++---------------------- etc/docker_images.ver | 2 +- 3 files changed, 27 insertions(+), 25 deletions(-) diff --git a/docs/changelog.md b/docs/changelog.md index 98dbfa0745..c27652fa90 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,4 +1,6 @@ # Changelog +* 1.6.1 + * fix image pull in cmd/build (#503) * 1.6.0 * cloud test setup documentation (#495) * Baseline for NTP tests (#494) diff --git a/etc/docker_images.txt b/etc/docker_images.txt index 4fd1fc4f47..6260cbd020 100644 --- a/etc/docker_images.txt +++ b/etc/docker_images.txt @@ -1,24 +1,24 @@ -daqf/aardvark a16be44f9e25 -daqf/default 0c3cdaf1ea29 -daqf/faucet 6d604669ca66 -daqf/faux1 4db039df8e23 -daqf/faux2 25a7fafb7e45 -daqf/gauge 765578acddba -daqf/networking bfcc9246b0ab -daqf/switch ae42486d4cbd -daqf/test_bacext c7b8e3edf77a -daqf/test_bacnet a72e6e252ba9 -daqf/test_brute 2bf056e2a552 -daqf/test_discover aabd2d3d70f6 -daqf/test_fail 68c70b8e1067 -daqf/test_hold 1a488be2c5ad -daqf/test_macoui b86a88ebc6b5 -daqf/test_manual 6d8f9eb5b231 -daqf/test_mudgee c5feb2e63c82 -daqf/test_nmap 379f01b87c8b -daqf/test_pass 4f4098eeb114 -daqf/test_password 98c312e8415b -daqf/test_ping c86303a534d5 -daqf/test_switch fa40f5010865 -daqf/test_tls 748c578eb01d -daqf/test_udmi b0eb4d8d6cd8 +daqf/aardvark df091e4d5825 +daqf/default d2544deaa6e1 +daqf/faucet b5fef9b579ff +daqf/faux1 221259034e61 +daqf/faux2 e6beb911f3eb +daqf/gauge 9cd676c425de +daqf/networking 410cc21c55fa +daqf/switch 2229d5b7071c +daqf/test_bacext ddfb25affc3a +daqf/test_bacnet c8aa2fc90b87 +daqf/test_brute 09e833bf46f0 +daqf/test_discover 6126fe95e495 +daqf/test_fail 4c4df8d524fa +daqf/test_hold 4a8f5ab032be +daqf/test_macoui 5925b633d2f5 +daqf/test_manual 10ad9b86ec3e +daqf/test_mudgee 370369e124a8 +daqf/test_nmap 393fd39e9b0f +daqf/test_pass 0f8e341c292b +daqf/test_password 5be042269e32 +daqf/test_ping 486bb80e9dd6 +daqf/test_switch 082159ca2f27 +daqf/test_tls d2320f042174 +daqf/test_udmi 07efea3d4641 diff --git a/etc/docker_images.ver b/etc/docker_images.ver index dc1e644a10..9c6d6293b1 100644 --- a/etc/docker_images.ver +++ b/etc/docker_images.ver @@ -1 +1 @@ -1.6.0 +1.6.1 From ab7eed7537e7737dc07aa2ddac65c7a7870d8aea Mon Sep 17 00:00:00 2001 From: henry54809 Date: Fri, 26 Jun 2020 07:04:54 -0700 Subject: [PATCH 05/38] USI (#496) --- usi/.gitignore | 3 + usi/Dockerfile.usi | 12 + usi/pom.xml | 146 ++++++++ .../main/java/daq/usi/ResponseHandler.java | 5 + .../main/java/daq/usi/SwitchController.java | 201 +++++++++++ .../daq/usi/SwitchTelnetClientSocket.java | 329 ++++++++++++++++++ usi/src/main/java/daq/usi/UsiImpl.java | 92 +++++ usi/src/main/java/daq/usi/UsiServer.java | 59 ++++ .../daq/usi/allied/AlliedTelesisX230.java | 292 ++++++++++++++++ .../main/java/daq/usi/cisco/Cisco9300.java | 289 +++++++++++++++ usi/src/main/proto/usi.proto | 79 +++++ .../java/daq/usi/SwitchControllerTest.java | 47 +++ usi/start | 2 + 13 files changed, 1556 insertions(+) create mode 100644 usi/.gitignore create mode 100644 usi/Dockerfile.usi create mode 100644 usi/pom.xml create mode 100644 usi/src/main/java/daq/usi/ResponseHandler.java create mode 100644 usi/src/main/java/daq/usi/SwitchController.java create mode 100644 usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java create mode 100644 usi/src/main/java/daq/usi/UsiImpl.java create mode 100644 usi/src/main/java/daq/usi/UsiServer.java create mode 100644 usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java create mode 100644 usi/src/main/java/daq/usi/cisco/Cisco9300.java create mode 100644 usi/src/main/proto/usi.proto create mode 100644 usi/src/test/java/daq/usi/SwitchControllerTest.java create mode 100755 usi/start diff --git a/usi/.gitignore b/usi/.gitignore new file mode 100644 index 0000000000..4b12f8ab84 --- /dev/null +++ b/usi/.gitignore @@ -0,0 +1,3 @@ +tmp/* +target/* +.idea/* diff --git a/usi/Dockerfile.usi b/usi/Dockerfile.usi new file mode 100644 index 0000000000..ba63658021 --- /dev/null +++ b/usi/Dockerfile.usi @@ -0,0 +1,12 @@ +FROM daqf/aardvark:latest + +# Do this alone first so it can be re-used by other build files. +RUN $AG update && $AG install openjdk-9-jre + +RUN $AG update && $AG install openjdk-9-jdk git + +COPY usi/ usi/ + +RUN cd usi && mvn clean compile assembly:single + +CMD ["./usi/start"] diff --git a/usi/pom.xml b/usi/pom.xml new file mode 100644 index 0000000000..7d87e5c53d --- /dev/null +++ b/usi/pom.xml @@ -0,0 +1,146 @@ + + 4.0.0 + com.redstone + usi + 0.0.1 +jar + usi + + UTF-8 + 1.8 + 1.8 + + + + + + io.grpc + grpc-bom + 1.30.0 + pom + import + + + + + + + junit + junit + 4.13 + test + + + commons-net + commons-net + 3.6 + + + io.grpc + grpc-netty-shaded + 1.30.0 + + + io.grpc + grpc-protobuf + 1.30.0 + + + io.grpc + grpc-stub + 1.30.0 + + + org.apache.tomcat + annotations-api + 6.0.53 + provided + + + org.junit.jupiter + junit-jupiter + RELEASE + compile + + + + + + kr.motd.maven + os-maven-plugin + 1.6.2 + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:3.12.0:exe:${os.detected.classifier} + grpc-java + io.grpc:protoc-gen-grpc-java:1.30.0:exe:${os.detected.classifier} + + ${basedir}/src/main/proto + + + + + + compile + compile-custom + + + + + + maven-assembly-plugin + + + + daq.usi.UsiServer + + + + jar-with-dependencies + + + + + maven-clean-plugin + 3.1.0 + + + + maven-resources-plugin + 3.1.0 + + + maven-surefire-plugin + 2.22.2 + + + maven-jar-plugin + 3.2.0 + + + maven-install-plugin + 2.5.2 + + + maven-deploy-plugin + 2.8.2 + + + org.apache.maven.plugins + maven-compiler-plugin + 3.8.1 + + 9 + 9 + + + + + \ No newline at end of file diff --git a/usi/src/main/java/daq/usi/ResponseHandler.java b/usi/src/main/java/daq/usi/ResponseHandler.java new file mode 100644 index 0000000000..4fd96af577 --- /dev/null +++ b/usi/src/main/java/daq/usi/ResponseHandler.java @@ -0,0 +1,5 @@ +package daq.usi; + +public interface ResponseHandler { + void receiveData(T data) throws Exception; +} diff --git a/usi/src/main/java/daq/usi/SwitchController.java b/usi/src/main/java/daq/usi/SwitchController.java new file mode 100644 index 0000000000..f9325c9ec0 --- /dev/null +++ b/usi/src/main/java/daq/usi/SwitchController.java @@ -0,0 +1,201 @@ +package daq.usi; + +/* + * Licensed to Google under one or more contributor license agreements. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import grpc.Interface; +import grpc.Power; +import grpc.SwitchActionResponse; +import java.util.HashMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +public abstract class SwitchController implements Runnable { + /** + * Terminal Prompt ends with '#' when enabled, '>' when not enabled. + */ + public static final String CONSOLE_PROMPT_ENDING_ENABLED = "#"; + public static final String CONSOLE_PROMPT_ENDING_LOGIN = ">"; + + // Define Common Variables Required for All Switch Interrogators + protected SwitchTelnetClientSocket telnetClientSocket; + protected Thread telnetClientSocketThread; + protected String remoteIpAddress; + protected int telnetPort; + protected boolean debug; + protected String username; + protected String password; + protected boolean userAuthorised = false; + protected boolean userEnabled = false; + protected String hostname = null; + protected boolean commandPending = false; + + public SwitchController(String remoteIpAddress, int telnetPort, String username, + String password) { + this(remoteIpAddress, telnetPort, username, password, false); + } + + /** + * Abstract Switch controller. Override this class for switch specific implementation + * + * @param remoteIpAddress switch ip address + * @param telnetPort switch telnet port + * @param username switch username + * @param password switch password + * @param debug for verbose logging + */ + public SwitchController( + String remoteIpAddress, int telnetPort, String username, String password, boolean debug) { + this.remoteIpAddress = remoteIpAddress; + this.telnetPort = telnetPort; + this.username = username; + this.password = password; + this.debug = debug; + telnetClientSocket = + new SwitchTelnetClientSocket(remoteIpAddress, telnetPort, this, debug); + } + + protected boolean containsPrompt(String consoleData) { + // Prompts usually hostname# or hostname(config)# + Pattern r = Pattern.compile(hostname + "\\s*(\\(.+\\))?" + CONSOLE_PROMPT_ENDING_ENABLED, 'g'); + Matcher m = r.matcher(consoleData); + return m.find(); + } + + protected boolean promptReady(String consoleData) { + // Prompts usually hostname# or hostname(config)# + Pattern r = Pattern.compile(hostname + "\\s*(\\(.+\\))?" + CONSOLE_PROMPT_ENDING_ENABLED + "$"); + Matcher m = r.matcher(consoleData); + return m.find(); + } + + /** + * Receive the raw data packet from the telnet connection and process accordingly. + * + * @param consoleData Most recent data read from the telnet socket buffer + */ + public void receiveData(String consoleData) { + if (debug) { + System.out.println( + java.time.LocalTime.now() + " receivedData:\t" + consoleData); + } + if (consoleData != null) { + try { + consoleData = consoleData.trim(); + if (!userAuthorised) { + handleLoginMessage(consoleData); + } else if (!userEnabled) { + handleEnableMessage(consoleData); + } else { + parseData(consoleData); + } + } catch (Exception e) { + telnetClientSocket.disposeConnection(); + e.printStackTrace(); + } + } + } + + /** + * Map a simple table containing a header and 1 row of data to a hashmap + * This method will also attempt to correct for mis-aligned tabular data as well as empty + * columns values. + * + * @param rawPacket Raw table response from a switch command + * @param colNames Array containing the names of the columns in the response + * @param mapNames Array containing names key names to map values to + * @return A HashMap containing the values mapped to the key names provided in the mapNames array + */ + protected static HashMap mapSimpleTable( + String rawPacket, String[] colNames, String[] mapNames) { + HashMap colMap = new HashMap<>(); + String[] lines = rawPacket.split("\n"); + if (lines.length > 0) { + String header = lines[0].trim(); + String values = lines[1].trim(); + int lastSectionEnd = 0; + for (int i = 0; i < colNames.length; ++i) { + int secStart = lastSectionEnd; + int secEnd; + if ((i + 1) >= colNames.length) { + // Resolving last column + secEnd = values.length(); + } else { + // Tabular data is not always reported in perfectly alignment, we need to calculate the + // correct values based off of the sections in between white spaces + int firstWhiteSpace = + getFirstWhiteSpace(values.substring(lastSectionEnd)) + lastSectionEnd; + int lastWhiteSpace = + getIndexOfNonWhitespaceAfterWhitespace(values.substring(firstWhiteSpace)) + + firstWhiteSpace; + int nextHeaderStart = header.indexOf(colNames[i + 1]); + secEnd = Math.min(lastWhiteSpace, nextHeaderStart); + } + lastSectionEnd = secEnd; + String sectionRaw = values.substring(secStart, secEnd).trim(); + colMap.put(mapNames[i], sectionRaw); + } + } + return colMap; + } + + + private static int getFirstWhiteSpace(String string) { + char[] characters = string.toCharArray(); + for (int i = 0; i < string.length(); i++) { + if (Character.isWhitespace(characters[i])) { + return i; + } + } + return -1; + } + + private static int getIndexOfNonWhitespaceAfterWhitespace(String string) { + char[] characters = string.toCharArray(); + boolean lastWhitespace = false; + for (int i = 0; i < string.length(); i++) { + if (Character.isWhitespace(characters[i])) { + lastWhitespace = true; + } else if (lastWhitespace) { + return i; + } + } + return -1; + } + + protected abstract void parseData(String consoleData) throws Exception; + + protected abstract void handleLoginMessage(String consoleData) throws Exception; + + protected abstract void handleEnableMessage(String consoleData) throws Exception; + + public abstract void getPower(int devicePort, ResponseHandler handler) throws Exception; + + public abstract void getInterface(int devicePort, ResponseHandler handler) + throws Exception; + + public abstract void connect(int devicePort, ResponseHandler handler) + throws Exception; + + public abstract void disconnect(int devicePort, ResponseHandler handler) + throws Exception; + + @Override + public void run() { + telnetClientSocketThread = new Thread(telnetClientSocket); + telnetClientSocketThread.start(); + } +} diff --git a/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java b/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java new file mode 100644 index 0000000000..7a73380612 --- /dev/null +++ b/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java @@ -0,0 +1,329 @@ +package daq.usi; + +/* + * Licensed to Google under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import org.apache.commons.net.telnet.EchoOptionHandler; +import org.apache.commons.net.telnet.InvalidTelnetOptionException; +import org.apache.commons.net.telnet.SuppressGAOptionHandler; +import org.apache.commons.net.telnet.TelnetClient; +import org.apache.commons.net.telnet.TelnetNotificationHandler; +import org.apache.commons.net.telnet.TerminalTypeOptionHandler; + +public class SwitchTelnetClientSocket implements TelnetNotificationHandler, Runnable { + public static String MORE_INDICATOR = "--More--"; + + protected static final int SLEEP_MS = 100; + // Rx empty space timeout before sending \n + protected static final int MAX_EMPTY_WAIT_COUNT = 70; + + protected TelnetClient telnetClient; + protected SwitchController interrogator; + + protected String remoteIpAddress = ""; + protected int remotePort = 23; + + protected InputStream inputStream; + protected OutputStream outputStream; + + protected Queue rxQueue = new LinkedList<>(); + + protected Thread readerThread; + protected Thread gatherThread; + + protected boolean debug; + + /** + * Telnet Client. + * @param remoteIpAddress switch ip address + * @param remotePort telent port + * @param interrogator switch specific switch controller + * @param debug For more verbose output. + */ + public SwitchTelnetClientSocket( + String remoteIpAddress, int remotePort, SwitchController interrogator, boolean debug) { + this.remoteIpAddress = remoteIpAddress; + this.remotePort = remotePort; + this.interrogator = interrogator; + this.debug = debug; + telnetClient = new TelnetClient(); + addOptionHandlers(); + } + + protected void connectTelnetSocket() { + int attempts = 0; + + while (!telnetClient.isConnected() && attempts < 10) { + try { + telnetClient.connect(remoteIpAddress, remotePort); + } catch (IOException e) { + System.err.println("Exception while connecting:" + e.getMessage()); + } + + attempts++; + + try { + Thread.sleep(SLEEP_MS); + } catch (InterruptedException e) { + System.err.println("Exception while connecting:" + e.getMessage()); + } + } + } + + @Override + public void run() { + connectTelnetSocket(); + + Runnable readDataRunnable = + () -> { + readData(); + }; + readerThread = new Thread(readDataRunnable); + + readerThread.start(); + + Runnable gatherDataRunnable = + () -> { + gatherData(); + }; + gatherThread = new Thread(gatherDataRunnable); + + gatherThread.start(); + + outputStream = telnetClient.getOutputStream(); + } + + protected void gatherData() { + StringBuilder rxData = new StringBuilder(); + + int rxQueueCount = 0; + + while (telnetClient.isConnected()) { + try { + if (rxQueue.isEmpty()) { + Thread.sleep(SLEEP_MS); + rxQueueCount++; + if (!interrogator.commandPending && rxQueueCount > MAX_EMPTY_WAIT_COUNT) { + if (debug) { + System.out.println("rxQueue Empty. Sending new line."); + } + rxQueueCount = 0; + writeData("\n"); + } + continue; + } + rxQueueCount = 0; + while (rxQueue.peek().trim() == "") { + rxQueue.poll(); + } + String rxTemp = rxQueue.poll(); + if (rxTemp.indexOf(MORE_INDICATOR) > 0) { + writeData("\n"); + if (debug) { + System.out.println("more position:" + rxTemp.indexOf(MORE_INDICATOR)); + System.out.println("Data: " + rxTemp); + } + rxTemp = rxTemp.replace(MORE_INDICATOR, ""); + rxData.append(rxTemp); + } else if (interrogator.userAuthorised + && !interrogator.promptReady((rxData.toString() + rxTemp).trim())) { + rxData.append(rxTemp); + if (debug) { + System.out.println("Waiting for more data till prompt ready: "); + System.out.println(rxData.toString().trim()); + } + } else { + rxQueueCount = 0; + rxData.append(rxTemp); + String rxGathered = rxData.toString().trim(); + rxData = new StringBuilder(); + interrogator.receiveData(rxGathered); + } + } catch (InterruptedException e) { + System.err.println("InterruptedException gatherData:" + e.getMessage()); + } + } + } + + /** + * * Callback method called when TelnetClient receives an option negotiation command. + * + * @param negotiationCode - type of negotiation command received (RECEIVED_DO, RECEIVED_DONT, + * RECEIVED_WILL, RECEIVED_WONT, RECEIVED_COMMAND) + * @param optionCode - code of the option negotiated * + */ + public void receivedNegotiation(int negotiationCode, int optionCode) { + String command = null; + switch (negotiationCode) { + case TelnetNotificationHandler.RECEIVED_DO: + command = "DO"; + break; + case TelnetNotificationHandler.RECEIVED_DONT: + command = "DONT"; + break; + case TelnetNotificationHandler.RECEIVED_WILL: + command = "WILL"; + break; + case TelnetNotificationHandler.RECEIVED_WONT: + command = "WONT"; + break; + case TelnetNotificationHandler.RECEIVED_COMMAND: + command = "COMMAND"; + break; + default: + command = Integer.toString(negotiationCode); // Should not happen + break; + } + System.out.println("Received " + command + " for option code " + optionCode); + } + + private void addOptionHandlers() { + TerminalTypeOptionHandler terminalTypeOptionHandler = + new TerminalTypeOptionHandler("VT100", false, false, true, false); + + EchoOptionHandler echoOptionHandler = new EchoOptionHandler(false, false, false, false); + + SuppressGAOptionHandler suppressGaOptionHandler = + new SuppressGAOptionHandler(true, true, true, true); + + try { + telnetClient.addOptionHandler(terminalTypeOptionHandler); + telnetClient.addOptionHandler(echoOptionHandler); + telnetClient.addOptionHandler(suppressGaOptionHandler); + } catch (InvalidTelnetOptionException e) { + System.err.println( + "Error registering option handlers InvalidTelnetOptionException: " + e.getMessage()); + } catch (IOException e) { + System.err.println("Error registering option handlers IOException: " + e.getMessage()); + } + } + + private String normalizeLineEnding(byte[] bytes, char endChar) { + List bytesBuffer = new ArrayList(); + + int countBreak = 0; + int countEsc = 0; + + for (int i = 0; i < bytes.length; i++) { + if (bytes[i] != 0) { + switch (bytes[i]) { + case 8: + // backspace \x08 + break; + case 10: + // newLineFeed \x0A + countBreak++; + bytesBuffer.add((byte) endChar); + break; + case 13: + // carriageReturn \x0D + countBreak++; + bytesBuffer.add((byte) endChar); + break; + case 27: + // escape \x1B + countEsc = 2; + break; + case 33: + // character:! + break; + default: + if (countEsc == 0) { + if (countBreak > 1) { + int size = bytesBuffer.size(); + for (int x = 0; x < countBreak - 1; x++) { + bytesBuffer.remove(size - 1 - x); + } + countBreak = 0; + } + bytesBuffer.add(bytes[i]); + } else { + countEsc--; + } + break; + } + } + } + + String bytesString = ""; + + for (Byte byteBuffer : bytesBuffer) { + bytesString = bytesString + (char) (byte) byteBuffer; + } + + return bytesString; + } + + protected void readData() { + int bytesRead = 0; + + inputStream = telnetClient.getInputStream(); + + while (telnetClient.isConnected()) { + try { + byte[] buffer = new byte[1024]; + + bytesRead = inputStream.read(buffer); + if (bytesRead > 0) { + String rawData = normalizeLineEnding(buffer, '\n'); + rxQueue.add(rawData); + // Useful for debugging + // rxQueue.add(new String(buffer, 0, bytesRead, StandardCharsets.UTF_8)); + } else { + try { + Thread.sleep(SLEEP_MS); + } catch (InterruptedException e) { + System.err.println("InterruptedException readData:" + e.getMessage()); + } + } + } catch (IOException e) { + System.err.println("Exception while reading socket:" + e.getMessage()); + } + } + } + + public void writeData(String data) { + writeOutputStream(data); + } + + private void writeOutputStream(String data) { + try { + outputStream.write(data.getBytes()); + outputStream.flush(); + } catch (IOException e) { + System.err.println("Exception while writing socket:" + e.getMessage()); + } + } + + /** + * Closes telnet connection. + */ + public void disposeConnection() { + try { + telnetClient.disconnect(); + } catch (IOException e) { + System.err.println("Exception while disposeConnection:" + e.getMessage()); + } + } +} diff --git a/usi/src/main/java/daq/usi/UsiImpl.java b/usi/src/main/java/daq/usi/UsiImpl.java new file mode 100644 index 0000000000..41dfea419f --- /dev/null +++ b/usi/src/main/java/daq/usi/UsiImpl.java @@ -0,0 +1,92 @@ +package daq.usi; + +import daq.usi.allied.AlliedTelesisX230; +import daq.usi.cisco.Cisco9300; +import grpc.Interface; +import grpc.Power; +import grpc.SwitchActionResponse; +import grpc.SwitchInfo; +import grpc.USIServiceGrpc; +import io.grpc.stub.StreamObserver; +import java.util.HashMap; +import java.util.Map; + +public class UsiImpl extends USIServiceGrpc.USIServiceImplBase { + private Map switchControllers; + + public UsiImpl() { + super(); + switchControllers = new HashMap<>(); + } + + private SwitchController getSwitchController(SwitchInfo switchInfo) { + String repr = String.join(",", switchInfo.getModel().toString(), switchInfo.getIpAddr(), + String.valueOf(switchInfo.getTelnetPort()), switchInfo.getUsername(), + switchInfo.getPassword()); + SwitchController sc = switchControllers.get(repr); + if (sc == null) { + switch (switchInfo.getModel()) { + case ALLIED_TELESIS_X230: { + sc = new AlliedTelesisX230(switchInfo.getIpAddr(), switchInfo.getTelnetPort(), + switchInfo.getUsername(), switchInfo.getPassword()); + break; + } + case CISCO_9300: { + sc = new Cisco9300(switchInfo.getIpAddr(), switchInfo.getTelnetPort(), + switchInfo.getUsername(), switchInfo.getPassword()); + break; + } + default: + break; + } + new Thread(sc).start(); + switchControllers.put(repr, sc); + } + return sc; + } + + @Override + public void getPower(SwitchInfo request, StreamObserver responseObserver) { + SwitchController sc = getSwitchController(request); + try { + sc.getPower(request.getDevicePort(), responseObserver::onNext); + } catch (Exception e) { + e.printStackTrace(); + responseObserver.onError(e); + } + } + + @Override + public void getInterface(SwitchInfo request, StreamObserver responseObserver) { + SwitchController sc = getSwitchController(request); + try { + sc.getInterface(request.getDevicePort(), responseObserver::onNext); + } catch (Exception e) { + e.printStackTrace(); + responseObserver.onError(e); + } + } + + @Override + public void connect(SwitchInfo request, StreamObserver responseObserver) { + SwitchController sc = getSwitchController(request); + try { + sc.connect(request.getDevicePort(), responseObserver::onNext); + } catch (Exception e) { + e.printStackTrace(); + responseObserver.onError(e); + } + } + + @Override + public void disconnect(SwitchInfo request, + StreamObserver responseObserver) { + SwitchController sc = getSwitchController(request); + try { + sc.disconnect(request.getDevicePort(), responseObserver::onNext); + } catch (Exception e) { + e.printStackTrace(); + responseObserver.onError(e); + } + } +} \ No newline at end of file diff --git a/usi/src/main/java/daq/usi/UsiServer.java b/usi/src/main/java/daq/usi/UsiServer.java new file mode 100644 index 0000000000..b5ce26374a --- /dev/null +++ b/usi/src/main/java/daq/usi/UsiServer.java @@ -0,0 +1,59 @@ +package daq.usi; + +import io.grpc.Server; +import io.grpc.ServerBuilder; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +public class UsiServer { + private Server server; + + private void start() throws IOException { + /* The port on which the server should run */ + int port = 5000; + server = ServerBuilder.forPort(port) + .addService(new UsiImpl()) + .build() + .start(); + System.out.println("Server started, listening on " + port); + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + // Use stderr here since the logger may have been reset by its JVM shutdown hook. + System.err.println("*** shutting down gRPC server since JVM is shutting down"); + try { + UsiServer.this.stop(); + } catch (InterruptedException e) { + e.printStackTrace(System.err); + } + System.err.println("*** server shut down"); + } + }); + } + + private void stop() throws InterruptedException { + if (server != null) { + server.shutdown().awaitTermination(30, TimeUnit.SECONDS); + } + } + + /** + * Await termination on the main thread since the grpc library uses daemon threads. + */ + private void blockUntilShutdown() throws InterruptedException { + if (server != null) { + server.awaitTermination(); + } + } + + /** + * Main method. + * @param args not used. + * @throws Exception Maybe a refactor is needed to throw more specific exceptions. + */ + public static void main(String[] args) throws Exception { + final UsiServer server = new UsiServer(); + server.start(); + server.blockUntilShutdown(); + } +} diff --git a/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java b/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java new file mode 100644 index 0000000000..f45b518542 --- /dev/null +++ b/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java @@ -0,0 +1,292 @@ +package daq.usi.allied; + +/* + * Licensed to the Google under one or more contributor license agreements. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import daq.usi.ResponseHandler; +import daq.usi.SwitchController; +import grpc.Interface; +import grpc.LinkStatus; +import grpc.POEStatus; +import grpc.POESupport; +import grpc.Power; +import grpc.SwitchActionResponse; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import java.util.Queue; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + + +public class AlliedTelesisX230 extends SwitchController { + private static final String[] powerExpected = + {"dev_interface", "admin", "pri", "oper", "power", "device", "dev_class", "max"}; + private static final String[] showPowerExpected = + {"Interface", "Admin", "Pri", "Oper", "Power", "Device", "Class", "Max"}; + private static final Map poeStatusMap = Map.of("Powered", POEStatus.ON, + "Off", POEStatus.OFF, "Fault", POEStatus.FAULT, "Deny", POEStatus.DENY); + // TODO Not certain about AT power "Deny" status string. Can't find a device to produce that state + private static final Map poeSupportMap = Map.of("Enabled", + POESupport.ENABLED, "Disabled", POESupport.DISABLED); + private static final Map interfaceProcessMap = + Map.of(Pattern.compile("Link is (\\w+)"), "link", + Pattern.compile("current duplex (\\w+)"), "duplex", + Pattern.compile("current speed (\\w+)"), "speed"); + + private static final int WAIT_MS = 100; + private ResponseHandler responseHandler; + + /** + * ATX230 Switch Controller. + * + * @param remoteIpAddress switch ip address + * @param telnetPort switch telnet port + * @param user switch username + * @param password switch password + */ + public AlliedTelesisX230( + String remoteIpAddress, + int telnetPort, + String user, + String password) { + this(remoteIpAddress, telnetPort, user, password, false); + } + + /** + * ATX230 Switch Controller. + * + * @param remoteIpAddress switch ip address + * @param telnetPort switch telnet port + * @param user switch username + * @param password switch password + * @param debug for verbose output + */ + public AlliedTelesisX230( + String remoteIpAddress, + int telnetPort, + String user, + String password, boolean debug) { + super(remoteIpAddress, telnetPort, user, password, debug); + this.username = user == null ? "manager" : user; + this.password = password == null ? "friend" : password; + } + + @Override + protected void parseData(String consoleData) throws Exception { + if (commandPending) { + responseHandler.receiveData(consoleData); + } + } + + /** + * Generic ATX230 Switch command to retrieve the Status of an interface. + */ + private String showIfaceStatusCommand(int interfacePort) { + return "show interface port1.0." + interfacePort; + } + + /** + * Generic ATX230 Switch command to retrieve the Power Status of an interface. Replace asterisk + * with actual port number for complete message. + */ + private String showIfacePowerStatusCommand(int interfacePort) { + return "show power-inline interface port1.0." + interfacePort; + } + + /** + * Port toggle commands. + * + * @param interfacePort port number + * @param enabled for bringing up/down interfacePort + * @return commands + */ + private String[] portManagementCommand(int interfacePort, boolean enabled) { + return new String[] { + "configure terminal", + "interface port1.0." + interfacePort, + (enabled ? "no " : "") + "shutdown", + "end" + }; + } + + + @Override + public void getPower(int devicePort, ResponseHandler handler) throws Exception { + while (commandPending) { + Thread.sleep(WAIT_MS); + } + String command = showIfacePowerStatusCommand(devicePort); + synchronized (this) { + commandPending = true; + responseHandler = data -> { + Map powerMap = processPowerStatusInline(data); + handler.receiveData(buildPowerResponse(powerMap)); + synchronized (this) { + commandPending = false; + } + }; + telnetClientSocket.writeData(command + "\n"); + } + } + + @Override + public void getInterface(int devicePort, ResponseHandler handler) throws Exception { + while (commandPending) { + Thread.sleep(WAIT_MS); + } + String command = showIfaceStatusCommand(devicePort); + synchronized (this) { + commandPending = true; + responseHandler = data -> { + Map interfaceMap = processInterfaceStatus(data); + handler.receiveData(buildInterfaceResponse(interfaceMap)); + synchronized (this) { + commandPending = false; + } + }; + telnetClientSocket.writeData(command + "\n"); + } + } + + private void managePort(int devicePort, ResponseHandler handler, + boolean enabled) throws Exception { + while (commandPending) { + Thread.sleep(WAIT_MS); + } + Queue commands = + new LinkedList<>(Arrays.asList(portManagementCommand(devicePort, enabled))); + SwitchActionResponse.Builder response = SwitchActionResponse.newBuilder(); + synchronized (this) { + commandPending = true; + responseHandler = data -> { + if (!commands.isEmpty()) { + telnetClientSocket.writeData(commands.poll() + "\n"); + return; + } + synchronized (this) { + commandPending = false; + handler.receiveData(response.setSuccess(true).build()); + } + }; + telnetClientSocket.writeData(commands.poll() + "\n"); + } + } + + @Override + public void connect(int devicePort, ResponseHandler handler) + throws Exception { + managePort(devicePort, handler, true); + } + + @Override + public void disconnect(int devicePort, ResponseHandler handler) + throws Exception { + managePort(devicePort, handler, false); + } + + private Interface buildInterfaceResponse(Map interfaceMap) { + Interface.Builder response = Interface.newBuilder(); + String duplex = interfaceMap.getOrDefault("duplex", ""); + int speed = 0; + try { + speed = Integer.parseInt(interfaceMap.get("speed")); + } catch (NumberFormatException e) { + System.out.println("Could not parse int: " + interfaceMap.get("speed")); + } + String linkStatus = interfaceMap.getOrDefault("link", ""); + return response.setLinkStatus(linkStatus.equals("UP") ? LinkStatus.UP : LinkStatus.DOWN) + .setDuplex(duplex) + .setLinkSpeed(speed) + .build(); + } + + private Power buildPowerResponse(Map powerMap) { + Power.Builder response = Power.newBuilder(); + float maxPower = 0; + float currentPower = 0; + try { + maxPower = Float.parseFloat(powerMap.get("max")); + currentPower = Float.parseFloat(powerMap.get("power")); + } catch (NumberFormatException e) { + System.out.println( + "Could not parse float: " + powerMap.get("max") + " or " + powerMap.get("power")); + } + String poeSupport = powerMap.getOrDefault("admin", null); + String poeStatus = powerMap.getOrDefault("oper", null); + return response.setPoeStatus(poeStatusMap.getOrDefault(poeStatus, POEStatus.OFF)) + .setPoeSupport(poeSupportMap.getOrDefault(poeSupport, POESupport.DISABLED)) + .setMaxPowerConsumption(maxPower) + .setCurrentPowerConsumption(currentPower).build(); + } + + private Map processInterfaceStatus(String response) { + Map interfaceMap = new HashMap<>(); + Arrays.stream(response.split("\n")).filter(s -> !containsPrompt(s)).forEach(s -> { + for (Pattern pattern : interfaceProcessMap.keySet()) { + Matcher m = pattern.matcher(s); + if (m.find()) { + interfaceMap.put(interfaceProcessMap.get(pattern), m.group(1)); + } + } + }); + return interfaceMap; + } + + private Map processPowerStatusInline(String response) { + String filtered = Arrays.stream(response.split("\n")) + .filter(s -> s.trim().length() > 0 + && !s.contains("show power-inline") + && !containsPrompt(s) + && !s.contains("(mW)")) // AT shows mW in second line + .collect(Collectors.joining("\n")); + return mapSimpleTable(filtered, showPowerExpected, powerExpected); + } + + /** + * Handles the process when using the enter command. Enable is a required step before commands can + * be sent to the switch. + * + * @param consoleData Raw console data received the the telnet connection. + */ + public void handleEnableMessage(String consoleData) throws Exception { + if (containsPrompt(consoleData)) { + userEnabled = true; + } + } + + /** + * Handles the process when logging into the switch. + * + * @param consoleData Raw console data received the the telnet connection. + */ + public void handleLoginMessage(String consoleData) throws Exception { + if (consoleData.endsWith("login:")) { + telnetClientSocket.writeData(username + "\n"); + } else if (consoleData.contains("Password:")) { + telnetClientSocket.writeData(password + "\n"); + } else if (consoleData.contains(CONSOLE_PROMPT_ENDING_LOGIN)) { + userAuthorised = true; + hostname = consoleData.split(CONSOLE_PROMPT_ENDING_LOGIN)[0]; + telnetClientSocket.writeData("enable\n"); + } else if (consoleData.contains("Login incorrect")) { + telnetClientSocket.disposeConnection(); + throw new Exception("Failed to Login, Bad Password"); + } + } + +} diff --git a/usi/src/main/java/daq/usi/cisco/Cisco9300.java b/usi/src/main/java/daq/usi/cisco/Cisco9300.java new file mode 100644 index 0000000000..83f1d19550 --- /dev/null +++ b/usi/src/main/java/daq/usi/cisco/Cisco9300.java @@ -0,0 +1,289 @@ +package daq.usi.cisco; + +/* + * Licensed to Google under one or more contributor license agreements. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import daq.usi.ResponseHandler; +import daq.usi.SwitchController; +import grpc.Interface; +import grpc.LinkStatus; +import grpc.POEStatus; +import grpc.POESupport; +import grpc.Power; +import grpc.SwitchActionResponse; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; +import java.util.Queue; +import java.util.stream.Collectors; + + +public class Cisco9300 extends SwitchController { + + private static final String[] interfaceExpected = + {"interface", "name", "status", "vlan", "duplex", "speed", "type"}; + private static final String[] showInterfaceExpected = + {"Port", "Name", "Status", "Vlan", "Duplex", "Speed", "Type"}; + private static final Map powerInlineMap = Map.of("Interface", "dev_interface", + "Inline Power Mode", "admin", + "Operational status", "oper", + "Measured at the port", "power", + "Device Type", "device", + "IEEE Class", "dev_class", + "Power available to the device", "max"); + private static final Map poeStatusMap = Map.of("on", POEStatus.ON, + "off", POEStatus.OFF, "fault", POEStatus.FAULT, "power-deny", POEStatus.DENY); + private static final Map poeSupportMap = Map.of("auto", POESupport.ENABLED, + "off", POESupport.DISABLED); + private static final int WAIT_MS = 100; + private ResponseHandler responseHandler; + + /** + * Cisco 9300 Switch Controller. + * + * @param remoteIpAddress switch ip + * @param telnetPort switch telnet port + * @param user switch username + * @param password switch password + */ + public Cisco9300( + String remoteIpAddress, + int telnetPort, + String user, + String password) { + super(remoteIpAddress, telnetPort, user, password); + this.username = user == null ? "admin" : user; + this.password = password == null ? "password" : password; + } + + /** + * Generic Cisco Switch command to retrieve the Status of an interface. + */ + private String showIfaceStatusCommand(int interfacePort) { + return "show interface gigabitethernet1/0/" + interfacePort + " status"; + } + + /** + * Generic Cisco Switch command to retrieve the Power Status of an interface. Replace asterisk + * with actual port number for complete message + */ + private String showIfacePowerStatusCommand(int interfacePort) { + return "show power inline gigabitethernet1/0/" + interfacePort + " detail"; + } + + /** + * Get port toggle commands. + * + * @param interfacePort port number + * @param enabled for bringing up/down interfacePort + * @return commands + */ + private String[] portManagementCommand(int interfacePort, boolean enabled) { + return new String[] { + "configure terminal", + "interface FastEthernet0/" + interfacePort, + (enabled ? "no " : "") + "shutdown", + "end" + }; + } + + /** + * Handles the process when using the enter command. Enable is a required step before commands can + * be sent to the switch. + * + * @param consoleData Raw console data received the the telnet connection. + */ + @Override + public void handleEnableMessage(String consoleData) throws Exception { + if (consoleData.contains("Password:")) { + telnetClientSocket.writeData(password + "\n"); + } else if (containsPrompt(consoleData)) { + userEnabled = true; + } else if (consoleData.contains("% Bad passwords")) { + telnetClientSocket.disposeConnection(); + throw new Exception("Could not Enable the User, Bad Password"); + } + } + + /** + * Handles the process when logging into the switch. + * + * @param consoleData Raw console data received the the telnet connection. + */ + @Override + public void handleLoginMessage(String consoleData) throws Exception { + if (consoleData.contains("Username:")) { + telnetClientSocket.writeData(username + "\n"); + } else if (consoleData.contains("Password:")) { + telnetClientSocket.writeData(password + "\n"); + } else if (consoleData.endsWith(CONSOLE_PROMPT_ENDING_LOGIN)) { + userAuthorised = true; + hostname = consoleData.split(CONSOLE_PROMPT_ENDING_LOGIN)[0]; + telnetClientSocket.writeData("enable\n"); + } else if (consoleData.contains("% Login invalid")) { + telnetClientSocket.disposeConnection(); + throw new Exception("Failed to Login, Login Invalid"); + } else if (consoleData.contains("% Bad passwords")) { + telnetClientSocket.disposeConnection(); + throw new Exception("Failed to Login, Bad Password"); + } + } + + /** + * Handles current data in the buffer read from the telnet console InputStream and sends it to the + * appropriate process. + * + * @param consoleData Current unhandled data in the buffered reader + */ + @Override + public void parseData(String consoleData) throws Exception { + if (commandPending) { + responseHandler.receiveData(consoleData); + } + } + + @Override + public void getPower(int devicePort, ResponseHandler powerResponseHandler) + throws Exception { + while (commandPending) { + Thread.sleep(WAIT_MS); + } + String command = showIfacePowerStatusCommand(devicePort); + synchronized (this) { + commandPending = true; + responseHandler = data -> { + Map powerMap = processPowerStatusInline(data); + powerResponseHandler.receiveData(buildPowerResponse(powerMap)); + synchronized (this) { + commandPending = false; + } + }; + telnetClientSocket.writeData(command + "\n"); + } + } + + @Override + public void getInterface(int devicePort, ResponseHandler handler) throws Exception { + while (commandPending) { + Thread.sleep(WAIT_MS); + } + String command = showIfaceStatusCommand(devicePort); + synchronized (this) { + commandPending = true; + responseHandler = data -> { + Map interfaceMap = processInterfaceStatus(data); + handler.receiveData(buildInterfaceResponse(interfaceMap)); + synchronized (this) { + commandPending = false; + } + }; + telnetClientSocket.writeData(command + "\n"); + } + } + + private void managePort(int devicePort, ResponseHandler handler, + boolean enabled) throws Exception { + while (commandPending) { + Thread.sleep(WAIT_MS); + } + Queue commands = + new LinkedList<>(Arrays.asList(portManagementCommand(devicePort, enabled))); + SwitchActionResponse.Builder response = SwitchActionResponse.newBuilder(); + synchronized (this) { + commandPending = true; + responseHandler = data -> { + if (!commands.isEmpty()) { + telnetClientSocket.writeData(commands.poll() + "\n"); + return; + } + synchronized (this) { + commandPending = false; + handler.receiveData(response.setSuccess(true).build()); + } + }; + telnetClientSocket.writeData(commands.poll() + "\n"); + } + } + + @Override + public void connect(int devicePort, ResponseHandler handler) + throws Exception { + managePort(devicePort, handler, true); + } + + @Override + public void disconnect(int devicePort, ResponseHandler handler) + throws Exception { + managePort(devicePort, handler, false); + } + + private Interface buildInterfaceResponse(Map interfaceMap) { + Interface.Builder response = Interface.newBuilder(); + String duplex = interfaceMap.getOrDefault("duplex", ""); + if (duplex.startsWith("a-")) { // Interface in Auto Duplex + duplex = duplex.replaceFirst("a-", ""); + } + + String speed = interfaceMap.getOrDefault("speed", ""); + if (speed.startsWith("a-")) { // Interface in Auto Speed + speed = speed.replaceFirst("a-", ""); + } + + String linkStatus = interfaceMap.getOrDefault("status", ""); + return response.setLinkStatus(linkStatus.equals("connected") ? LinkStatus.UP : LinkStatus.DOWN) + .setDuplex(duplex) + .setLinkSpeed(Integer.parseInt(speed)) + .build(); + } + + private Power buildPowerResponse(Map powerMap) { + Power.Builder response = Power.newBuilder(); + float maxPower = Float.parseFloat(powerMap.get("max")); + float currentPower = Float.parseFloat(powerMap.get("power")); + + String poeSupport = powerMap.getOrDefault("admin", null); + String poeStatus = powerMap.getOrDefault("oper", null); + return response.setPoeStatus(poeStatusMap.getOrDefault(poeStatus, null)) + .setPoeSupport(poeSupportMap.getOrDefault(poeSupport, null)) + .setMaxPowerConsumption(maxPower) + .setCurrentPowerConsumption(currentPower).build(); + } + + private Map processInterfaceStatus(String response) { + String filtered = Arrays.stream(response.split("\n")) + .filter(s -> !containsPrompt(s)) + .collect(Collectors.joining("\n")); + return mapSimpleTable(filtered, showInterfaceExpected, interfaceExpected); + } + + private Map processPowerStatusInline(String response) { + Map powerMap = new HashMap<>(); + Arrays.stream(response.split("\n")) + .forEach( + line -> { + String[] lineParts = line.trim().split(":"); + if (lineParts.length > 1) { + String powerMapKey = powerInlineMap.getOrDefault(lineParts[0], null); + if (powerMapKey != null) { + powerMap.put(powerMapKey, lineParts[1].trim()); + } + } + }); + return powerMap; + } + + +} diff --git a/usi/src/main/proto/usi.proto b/usi/src/main/proto/usi.proto new file mode 100644 index 0000000000..a85db75fbb --- /dev/null +++ b/usi/src/main/proto/usi.proto @@ -0,0 +1,79 @@ +/* + * Specification for Universal Switch Interface. + */ +syntax = "proto3"; +package usi; + +option java_multiple_files = true; +option java_outer_classname = "USIProto"; +option java_package = "grpc"; + +service USIService { + rpc GetPower(SwitchInfo) returns (Power) {} + rpc GetInterface(SwitchInfo) returns (Interface) {} + rpc disconnect(SwitchInfo) returns (SwitchActionResponse) {} + rpc connect(SwitchInfo) returns (SwitchActionResponse) {} +} + +message SwitchActionResponse { + bool success = 1; +} + +message Power { + float current_power_consumption = 1; + float max_power_consumption = 2; + POESupport poe_support = 3; + POEStatus poe_status = 4; +} + +message Interface { + LinkStatus link_status = 1; + int32 link_speed = 2; + string duplex = 3; +} + +enum SwitchModel { + ALLIED_TELESIS_X230 = 0; + CISCO_9300 = 1; +} + +enum LinkStatus { + UP = 0; + DOWN = 1; +} + +enum POESupport { + ENABLED = 0; + DISABLED = 1; +} + +enum POEStatus { + ON = 0; + OFF = 1; + FAULT = 2; + DENY = 3; +} + +/* + * System configuraiton of the access switch. This is used by the system + * to setup and configure the switch itself. + */ +message SwitchInfo { + // IP address of external switch. + string ip_addr = 1; + + // Telnet Port + int32 telnet_port = 2; + + // Device Port + int32 device_port = 3; + + // Switch model + SwitchModel model = 4; + + // Switch connect username + string username = 5; + + // Switch connect password + string password = 6; +} diff --git a/usi/src/test/java/daq/usi/SwitchControllerTest.java b/usi/src/test/java/daq/usi/SwitchControllerTest.java new file mode 100644 index 0000000000..656f7e0c5e --- /dev/null +++ b/usi/src/test/java/daq/usi/SwitchControllerTest.java @@ -0,0 +1,47 @@ +package daq.usi; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +import java.util.Map; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class SwitchControllerTest { + + @BeforeEach + void setUp() { + } + + @AfterEach + void tearDown() { + } + + @Test + void mapSimpleTableEmptyInput() { + String raw = ""; + String[] colNames = {"a", "b"}; + String[] mapNames = {"a", "b"}; + Map response = SwitchController.mapSimpleTable(raw, colNames, mapNames); + for (String key : response.keySet()) { + assertNull(response.get(key)); + } + } + + @Test + void mapSimpleTableSampleInputAT() { + String raw = "Interface Admin Pri Oper Power Device Class Max \n" + + "port1.0.1 Enabled Low Powered 3337 n/a 0 15400 [C]"; + String[] colNames = {"Interface", "Admin", "Pri", "Oper", "Power", "Device", "Class", "Max"}; + String[] mapNames = {"interface", "admin", "pri", "oper", "power", "device", "class", "max"}; + Map expected = Map.of("interface", "port1.0.1", "admin", "Enabled", "pri", + "Low", "oper", "Powered", "power", "3337", "device", "n/a", + "class", "0", "max", "15400 [C]"); + Map response = SwitchController.mapSimpleTable(raw, colNames, mapNames); + for (String key : response.keySet()) { + assertEquals(response.get(key), expected.get(key)); + } + } + +} \ No newline at end of file diff --git a/usi/start b/usi/start new file mode 100755 index 0000000000..4c8350ff73 --- /dev/null +++ b/usi/start @@ -0,0 +1,2 @@ +#!/bin/bash -e +java -jar usi/target/usi-0.0.1-jar-with-dependencies.jar From f211531d639fc6ed6fe57e5917f9b98b394c74b5 Mon Sep 17 00:00:00 2001 From: Noureddine Date: Mon, 29 Jun 2020 13:48:42 +0100 Subject: [PATCH 06/38] Update Minimum Send Test (#498) * complete changes to min_send test --- subset/network/network_tests.py | 102 +++++++++++++++++++++++++++++--- 1 file changed, 93 insertions(+), 9 deletions(-) diff --git a/subset/network/network_tests.py b/subset/network/network_tests.py index 6dbaca3f85..d785debc67 100644 --- a/subset/network/network_tests.py +++ b/subset/network/network_tests.py @@ -1,5 +1,8 @@ import subprocess, time, sys, json +import re +import datetime + arguments = sys.argv test_request = str(arguments[1]) @@ -24,13 +27,18 @@ description_communication_type = 'Device sends unicast or broadcast packets.' description_ntp_support = 'Device sends NTP request packets.' -tcpdump_display_all_packets = 'tcpdump -n src host ' + device_address + ' -r ' + cap_pcap_file +tcpdump_display_all_packets = 'tcpdump -tttt -n src host ' + device_address + ' -r ' + cap_pcap_file tcpdump_display_udp_bacnet_packets = 'tcpdump -n udp dst portrange 47808-47809 -r ' + cap_pcap_file -tcpdump_display_arp_packets = 'tcpdump arp -r ' + cap_pcap_file +tcpdump_display_arp_packets = 'tcpdump arp -n src host ' + device_address + ' -r ' + cap_pcap_file tcpdump_display_ntp_packets = 'tcpdump dst port 123 -r ' + cap_pcap_file tcpdump_display_eapol_packets = 'tcpdump port 1812 or port 1813 or port 3799 -r ' + cap_pcap_file tcpdump_display_broadcast_packets = 'tcpdump broadcast and src host ' + device_address + ' -r ' + cap_pcap_file +system_conf_file = "/config/inst/system.conf" +tcpdump_date_format = "%Y-%m-%d %H:%M:%S.%f" +min_send_seconds = 300 +min_send_duration = "5 minutes" + def write_report(string_to_append): print(string_to_append.strip()) with open(report_filename, 'a+') as file_open: @@ -49,7 +57,7 @@ def add_packet_count_to_report(packet_type, packet_count): write_report("{i} {t} Packets recieved={p}\n".format(i=ignore, t=packet_type, p=packet_count)) def add_packet_info_to_report(packets_received): - packet_list = packets_received.rstrip().split("\n") + packet_list = packets_received.strip().split("\n") outnum = min(len(packet_list), max_packets_in_report) for x in range(0, outnum): write_report("{i} {p}\n".format(i=ignore, p=packet_list[x])) @@ -99,19 +107,95 @@ def decode_json_config(config_file, map_name, action): elif action == 'remove': remove_from_port_list(port_map) + +def get_scan_length(config_file): + """ Gets length of the monitor.pcap scan + + Reads the system.conf file to and returns the length of the monitor_scan + + Args: + config_file: Location of system.conf file within test container + + Returns: + Length of monitor scan in seconds + + If not defined, or system.conf could not be found + returns false + """ + + scan_length = False + try: + with open(config_file) as file: + for line in file: + match = re.search(r'^monitor_scan_sec=(\d+)', line) + if match: + matched_length = int(match.group(1)) + # If scan length = 0 or not found, then monitor scan does not exist + scan_length = matched_length if matched_length > 0 else False + return scan_length + except Exception as e: + write_report("Error encountered reading system.conf {}".format(e)) + return False + def test_connection_min_send(): + """ Runs the connection.min_send test + + Tests if the device sends data packets of any type (inc data, NTP, etc) + within a period of 5 minutes by looking through the monitor.pcap file + + The length of test can be configured using the min_send_seconds variable + at the start of the file + """ + + # Get scan length + scan_length = get_scan_length(system_conf_file) + min_send_delta = datetime.timedelta(seconds=min_send_seconds) + min_send_pass = False + + # The test scans the monitor.pcap, so if it's not found skip + if not scan_length: + add_summary("DAQ monitor scan not running, test skipped") + return 'skip' + arp_shell_result = shell_command_with_result(tcpdump_display_arp_packets, 0, False) arp_packets_received = packets_received_count(arp_shell_result) if arp_packets_received > 0: add_summary("ARP packets received.") + shell_result = shell_command_with_result(tcpdump_display_all_packets, 0, False) - all_packets_received = packets_received_count(shell_result) - app_packets_received = all_packets_received - arp_packets_received - if app_packets_received > 0: - add_summary("Other packets received.") - print('min_send_packets', arp_packets_received, all_packets_received) + all_packets = shell_result.splitlines() + + # Loop through tcpdump result and measure the time between succesive packets + for i, packet in enumerate(all_packets): + # datetime is the first 26 characters of the line + packet_time = datetime.datetime.strptime(packet[:26], tcpdump_date_format) + + if i == 0: + previous_packet_time = packet_time + continue + + delta = packet_time - previous_packet_time + if delta < min_send_delta: + min_send_pass = True + break + + previous_packet_time = packet_time + add_packet_info_to_report(shell_result) - return 'pass' if app_packets_received > 0 else 'fail' + + if not min_send_pass: + if scan_length > min_send_seconds: + add_summary('Data packets were not sent at a frequency less than ' + + min_send_duration) + return 'fail' + else: + add_summary('Please set DAQ monitor scan to be greater than ' + + min_send_duration) + return 'skip' + + add_summary('Data packets were sent at a frequency of less than ' + + min_send_duration) + return 'pass' def test_connection_dhcp_long(): shell_result = shell_command_with_result(tcpdump_display_arp_packets, 0, False) From 4e339824d263b8d5a698b0aef2d2f59c6480f25c Mon Sep 17 00:00:00 2001 From: Trevor Pering Date: Tue, 30 Jun 2020 08:06:05 -0700 Subject: [PATCH 07/38] Minor UDMI updates for pubber keygen --- pubber/bin/keygen | 33 ++++++++----------- .../main/java/daq/pubber/MqttPublisher.java | 4 ++- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/pubber/bin/keygen b/pubber/bin/keygen index 1b3cf60d67..5ee3166d16 100755 --- a/pubber/bin/keygen +++ b/pubber/bin/keygen @@ -1,26 +1,21 @@ #!/bin/bash -e -ROOT=$(realpath $(dirname $0)/../..) -cd $ROOT - -TARGET_PREFIX=local/rsa_ - -PUBLIC_CERT=${TARGET_PREFIX}cert.pem -PRIVATE_CERT=${TARGET_PREFIX}private.pem -PRIVATE_KEY=${TARGET_PREFIX}private.pkcs8 - -if [ -f $PUBLIC_CERT ]; then - echo $PUBLIC_CERT already exists, exiting. +if [ "$#" != 2 ]; then + echo $0 [type] [out_dir] false fi -if [ -f $PRIVATE_CERT ]; then - echo $PRIVATE_CERT already exists, exiting. - false -fi -if [ -f $PRIVATE_KEY ]; then - echo $PRIVATE_KEY already exists, exiting. + +type=$1 +cd $2 + +if [ $type == RS256 ]; then + openssl genrsa -out rsa_private.pem 2048 + openssl rsa -in rsa_private.pem -pubout -out rsa_public.pem +elif [ $type == RS256_X509 ]; then + openssl req -x509 -nodes -newkey rsa:2048 -keyout rsa_private.pem -days 1000000 -out rsa_cert.pem -subj "/CN=unused" +else + echo Unknown key type $type. Try one of { RS256, RS256_X509 } false fi -openssl req -x509 -nodes -newkey rsa:2048 -keyout $PRIVATE_CERT -days 1000000 -out $PUBLIC_CERT -subj "/CN=unused" -openssl pkcs8 -topk8 -inform PEM -outform DER -in $PRIVATE_CERT -nocrypt > $PRIVATE_KEY +openssl pkcs8 -topk8 -inform PEM -outform DER -in rsa_private.pem -nocrypt > rsa_private.pkcs8 diff --git a/pubber/src/main/java/daq/pubber/MqttPublisher.java b/pubber/src/main/java/daq/pubber/MqttPublisher.java index 9358ee3546..ea32788d30 100644 --- a/pubber/src/main/java/daq/pubber/MqttPublisher.java +++ b/pubber/src/main/java/daq/pubber/MqttPublisher.java @@ -155,7 +155,9 @@ private MqttClient newMqttClient(String deviceId) { try { Preconditions.checkNotNull(registryId, "registryId is null"); Preconditions.checkNotNull(deviceId, "deviceId is null"); - MqttClient mqttClient = new MqttClient(getBrokerUrl(), getClientId(deviceId), + String clientId = getClientId(deviceId); + LOG.info("Creating new mqtt client for " + clientId); + MqttClient mqttClient = new MqttClient(getBrokerUrl(), clientId, new MemoryPersistence()); return mqttClient; } catch (Exception e) { From 970c9d6b0b13a87de699d93ef66585202edb8b3b Mon Sep 17 00:00:00 2001 From: pbatta Date: Tue, 30 Jun 2020 13:44:07 -0700 Subject: [PATCH 08/38] add check for git version tag in Travis (#519) --- bin/test_daq | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/bin/test_daq b/bin/test_daq index 279e3bc53b..779cef8998 100755 --- a/bin/test_daq +++ b/bin/test_daq @@ -31,6 +31,16 @@ echo -n "DAQ version " git describe --dirty --always echo +TAGGED_VERSION=`cat etc/docker_images.ver` +if ! git show $TAGGED_VERSION > /dev/null; then + echo + echo Tagged version $TAGGED_VERSION not found. + echo Maybe you need to fetch tags: git fetch --tags. + echo If this is on Travis, ensure tags were pushed to your repo. + echo + false +fi + if [ -d faucet ]; then echo -n "Last FAUCET commit " (cd $FAUCET; git log -n 1 --pretty=format:"%h - %an, %ar : %s" || true) From 70bc4f74bc51dfebd6d95338e02524202e6e4ea9 Mon Sep 17 00:00:00 2001 From: Nour Date: Sat, 4 Jul 2020 17:25:05 +0100 Subject: [PATCH 09/38] add ssh test --- subset/security/build.conf | 1 + subset/security/readme.md | 8 ++++++++ subset/security/test_ssh | 42 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100755 subset/security/test_ssh diff --git a/subset/security/build.conf b/subset/security/build.conf index 763d155e46..5b7e9ed28d 100644 --- a/subset/security/build.conf +++ b/subset/security/build.conf @@ -1,3 +1,4 @@ build subset/security add tls add password +add ssh \ No newline at end of file diff --git a/subset/security/readme.md b/subset/security/readme.md index a5f65cf277..2143155dc0 100644 --- a/subset/security/readme.md +++ b/subset/security/readme.md @@ -43,3 +43,11 @@ The functional test code is included in the `tlstest/src/main/java` folder. - pass -> If the device responds to a connection with TLS 1.3 support and provides a valid certificate. - fail -> If the device responds to a connection with TLS 1.3 support and provides an invalid certificate. - skip -> If no connection to the device can be established. + +## test_ssh +The SSH test will check that if a device has an SSH server, this only supports SSHv2 + +### Conditions for seucrity.ssh.version +- pass -> If the device runs an SSH server which only supports SSHv2 +- fail -> If the device runs an SSH server which supports SSHv1 +- skip -> If the device does not run an SSH server \ No newline at end of file diff --git a/subset/security/test_ssh b/subset/security/test_ssh new file mode 100755 index 0000000000..f1439a1108 --- /dev/null +++ b/subset/security/test_ssh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# Checks if a device only support SSHv2 +# Runs NMAP script to check if a device runs + +source reporting.sh + +TEST_NAME="security.ssh.version" +TEST_DESCRIPTION="Check that device only support SSHv2" + +LOG="tmp/nmap_log.txt" + +nmap -sV -sC -oN $LOG $TARGET_IP + +nmap_log=$(cat $LOG ) + +sshv1=$(grep 'sshv1: Server supports SSHv1' $LOG) + +if [[ -z "${sshv1}" ]]; then + #No SSHv1, but is there an SSHv2 running ? + sshv2=$(grep -P '^\d+\/tcp\s+open ssh.*protocol 2.0\)$' $LOG) + + if [[ -z "${sshv2}" ]]; then + test_outcome="skip" + test_summary="Device is not running an SSH server" + else + test_outcome="pass" + test_summary="Device only supports SSHv2" + fi + +else + test_outcome="fail" + test_summary="Device supports SSHv1" +fi + +result_and_summary="RESULT ${test_outcome} ${TEST_NAME} ${test_summary}" + +write_out_result $REPORT \ + "$TEST_NAME" \ + "$TEST_DESCRIPTION" \ + "$nmap_log" \ + "$result_and_summary" \ No newline at end of file From 6ad2c1ac39eee2f6ec32cabc03cf5bfa9a573598 Mon Sep 17 00:00:00 2001 From: Nour Date: Sat, 4 Jul 2020 18:17:40 +0100 Subject: [PATCH 10/38] add ssh test dockerfile --- subset/security/Dockerfile.test_ssh | 7 +++++++ subset/security/test_ssh | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 subset/security/Dockerfile.test_ssh diff --git a/subset/security/Dockerfile.test_ssh b/subset/security/Dockerfile.test_ssh new file mode 100644 index 0000000000..aa701b5550 --- /dev/null +++ b/subset/security/Dockerfile.test_ssh @@ -0,0 +1,7 @@ +FROM daqf/aardvark:latest + +RUN $AG update && $AG install nmap + +COPY subset/security/test_ssh . + +CMD ./test_ssh diff --git a/subset/security/test_ssh b/subset/security/test_ssh index f1439a1108..cdcbed097f 100755 --- a/subset/security/test_ssh +++ b/subset/security/test_ssh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e # # Checks if a device only support SSHv2 # Runs NMAP script to check if a device runs From 80c855236d13a1a5d9354d138109b70ba446d209 Mon Sep 17 00:00:00 2001 From: Nour Date: Sat, 4 Jul 2020 21:32:15 +0100 Subject: [PATCH 11/38] add ssh option to faux, fixes --- docker/include/bin/start_faux | 6 ++++++ subset/security/test_ssh | 16 +++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/docker/include/bin/start_faux b/docker/include/bin/start_faux index 94c9711ab3..63a3a95792 100755 --- a/docker/include/bin/start_faux +++ b/docker/include/bin/start_faux @@ -126,6 +126,12 @@ if [ -n "${options[telnet]}" ]; then (while true; do echo Telnet `hostname`; nc -nvlt -p 23 -e `which hostname`; done) & fi +# SSH server is installed but not running +if [ -n "${options[ssh]}" ]; then + echo Starting SSH server + service ssh start +fi + if [ -n "${options[bacnet]}" ]; then echo Starting bacnet loop device. java -cp bacnetTests/build/libs/bacnet-1.0-SNAPSHOT-all.jar \ diff --git a/subset/security/test_ssh b/subset/security/test_ssh index cdcbed097f..ede82f4f0b 100755 --- a/subset/security/test_ssh +++ b/subset/security/test_ssh @@ -1,14 +1,15 @@ -#!/bin/bash -e +#!/bin/bash # # Checks if a device only support SSHv2 -# Runs NMAP script to check if a device runs +# Runs NMAP to check if SSH is available +# Uses the 'sshv_1' nmap script to check if the server supports SSHv1 source reporting.sh TEST_NAME="security.ssh.version" TEST_DESCRIPTION="Check that device only support SSHv2" - -LOG="tmp/nmap_log.txt" +REPORT=/tmp/report.txt +LOG=/tmp/nmap_log.txt nmap -sV -sC -oN $LOG $TARGET_IP @@ -17,9 +18,9 @@ nmap_log=$(cat $LOG ) sshv1=$(grep 'sshv1: Server supports SSHv1' $LOG) if [[ -z "${sshv1}" ]]; then - #No SSHv1, but is there an SSHv2 running ? + #No SSHv1, but is there an SSHv2 server running ? sshv2=$(grep -P '^\d+\/tcp\s+open ssh.*protocol 2.0\)$' $LOG) - + if [[ -z "${sshv2}" ]]; then test_outcome="skip" test_summary="Device is not running an SSH server" @@ -39,4 +40,5 @@ write_out_result $REPORT \ "$TEST_NAME" \ "$TEST_DESCRIPTION" \ "$nmap_log" \ - "$result_and_summary" \ No newline at end of file + "$result_and_summary" + \ No newline at end of file From d29d0383223f87010924b95844746451aa6181fa Mon Sep 17 00:00:00 2001 From: Nour Date: Sun, 5 Jul 2020 22:26:13 +0100 Subject: [PATCH 12/38] fix ssh test, travis results --- docs/device_report.md | 43 ++++++++++++++++++++++++-- resources/test_site/module_config.json | 3 ++ subset/security/test_ssh | 4 +-- testing/test_aux.out | 11 ++++++- testing/test_aux.sh | 3 +- 5 files changed, 57 insertions(+), 7 deletions(-) diff --git a/docs/device_report.md b/docs/device_report.md index 2ab3ab3320..ea6c43a5ca 100644 --- a/docs/device_report.md +++ b/docs/device_report.md @@ -56,7 +56,7 @@ Overall device result FAIL |---|---|---|---|---| |Required|1|0|0|0| |Recommended|2|0|0|0| -|Other|1|2|22|2| +|Other|3|2|21|2| |Result|Test|Category|Expectation|Notes| |---|---|---|---|---| @@ -79,9 +79,10 @@ Overall device result FAIL |skip|security.firmware|Other|Other|Could not retrieve a firmware version with nmap. Check bacnet port.| |skip|security.passwords.http|Other|Other|Port 80 is not open on target device.| |skip|security.passwords.https|Other|Other|Port 443 is not open on target device.| -|skip|security.passwords.ssh|Other|Other|Port 22 is not open on target device.| +|pass|security.passwords.ssh|Other|Other|Default passwords have been changed.| |skip|security.passwords.telnet|Other|Other|Port 23 is not open on target device.| |pass|security.ports.nmap|Security|Recommended|Only allowed ports found open.| +|pass|security.ssh.version|Other|Other|Device only supports SSHv2| |skip|security.tls.v1|Other|Other|IOException unable to connect to server| |skip|security.tls.v1.x509|Other|Other|IOException unable to connect to server| |skip|security.tls.v1_2|Other|Other|IOException unable to connect to server| @@ -420,6 +421,7 @@ Nmap scan report for daq-faux-1 (X.X.X.X) Host is up (XXX). PORT STATE SERVICE +22/tcp open ssh 10000/tcp open snet-sensor-mgmt MAC Address: 9A:02:57:1E:8F:01 (Unknown) @@ -442,6 +444,7 @@ Nmap scan report for daq-faux-1 (X.X.X.X) Host is up (XXX). PORT STATE SERVICE +22/tcp open ssh 10000/tcp open snet-sensor-mgmt MAC Address: 9A:02:57:1E:8F:01 (Unknown) @@ -464,6 +467,7 @@ Nmap scan report for daq-faux-1 (X.X.X.X) Host is up (XXX). PORT STATE SERVICE +22/tcp open ssh 10000/tcp open snet-sensor-mgmt MAC Address: 9A:02:57:1E:8F:01 (Unknown) @@ -486,14 +490,47 @@ Nmap scan report for daq-faux-1 (X.X.X.X) Host is up (XXX). PORT STATE SERVICE +22/tcp open ssh 10000/tcp open snet-sensor-mgmt MAC Address: 9A:02:57:1E:8F:01 (Unknown) Nmap done: 1 IP address (1 host up) scanned in XXX nmap X.X.X.X +Starting brute force... +hydra -L /tmp/ssh_usernames.txt -P /tmp/ssh_passwords.txt X.X.X.X ssh -s 22 +Hydra v8.6 (c) 2017 by van Hauser/THC - Please do not use in military or secret service organizations, or for illegal purposes. + +Hydra (http://www.thc.org/thc-hydra) starting at XXX +[DATA] max 6 tasks per 1 server, overall 6 tasks, 6 login tries (l:2/p:3), ~1 try per task +[DATA] attacking ssh://X.X.X.X:22/ +1 of 1 target completed, 0 valid passwords found +Hydra (http://www.thc.org/thc-hydra) finished at XXX Done. -------------------- -RESULT skip security.passwords.ssh Port 22 is not open on target device. +RESULT pass security.passwords.ssh Default passwords have been changed. + +``` + +#### Module Config + +|Attribute|Value| +|---|---| +|enabled|True| + +## Module ssh + + +#### Report + +``` +-------------------- +security.ssh.version +-------------------- +Check that device only support SSHv2 +-------------------- +22/tcp open ssh OpenSSH 7.6p1 Ubuntu 4ubuntu0.3 (Ubuntu Linux; protocol 2.0) +-------------------- +RESULT pass security.ssh.version Device only supports SSHv2 ``` diff --git a/resources/test_site/module_config.json b/resources/test_site/module_config.json index 4d672b67aa..8b7c157ed4 100644 --- a/resources/test_site/module_config.json +++ b/resources/test_site/module_config.json @@ -17,6 +17,9 @@ }, "manual": { "enabled": true + }, + "ssh": { + "enabled": true } }, "process": { diff --git a/subset/security/test_ssh b/subset/security/test_ssh index ede82f4f0b..e1af63282c 100755 --- a/subset/security/test_ssh +++ b/subset/security/test_ssh @@ -11,7 +11,7 @@ TEST_DESCRIPTION="Check that device only support SSHv2" REPORT=/tmp/report.txt LOG=/tmp/nmap_log.txt -nmap -sV -sC -oN $LOG $TARGET_IP +nmap -sV -sC $TARGET_IP > $LOG nmap_log=$(cat $LOG ) @@ -39,6 +39,6 @@ result_and_summary="RESULT ${test_outcome} ${TEST_NAME} ${test_summary}" write_out_result $REPORT \ "$TEST_NAME" \ "$TEST_DESCRIPTION" \ - "$nmap_log" \ + "$sshv2" \ "$result_and_summary" \ No newline at end of file diff --git a/testing/test_aux.out b/testing/test_aux.out index acf12f7aa7..98d51c0919 100644 --- a/testing/test_aux.out +++ b/testing/test_aux.out @@ -46,7 +46,7 @@ RESULT pass security.tls.v1_3.x509 Certificate active for current date and publi RESULT skip security.passwords.http Port 80 is not open on target device. RESULT skip security.passwords.https Port 443 is not open on target device. RESULT skip security.passwords.telnet Port 23 is not open on target device. -RESULT skip security.passwords.ssh Port 22 is not open on target device. +RESULT pass security.passwords.ssh Default passwords have been changed. RESULT fail security.passwords.http Default passwords have not been changed. RESULT fail security.passwords.https Default passwords have not been changed. RESULT fail security.passwords.telnet Default passwords have not been changed. @@ -57,6 +57,9 @@ RESULT pass security.passwords.telnet Default passwords have been changed. RESULT pass security.passwords.ssh Default passwords have been changed. RESULT skip security.firmware Could not retrieve a firmware version with nmap. Check bacnet port. RESULT pass security.firmware version found: ?\xFF\xFF\x19,>u\x08\x00no +RESULT pass security.ssh.version Device only supports SSHv2 +RESULT pass security.ssh.version Device only supports SSHv2 +RESULT pass security.ssh.version Device only supports SSHv2 dhcp requests 1 1 0 1 01: [] 02: ['02:macoui:TimeoutError', '02:ping:TimeoutError'] @@ -111,6 +114,9 @@ port-01 module_config modules "password": { "enabled": true }, + "ssh": { + "enabled": true + }, "switch": { "enabled": true, "poe": { @@ -160,6 +166,9 @@ port-02 module_config modules "password": { "enabled": true }, + "ssh": { + "enabled": true + }, "switch": { "enabled": true }, diff --git a/testing/test_aux.sh b/testing/test_aux.sh index ebf75ee79c..0c0a384fa0 100755 --- a/testing/test_aux.sh +++ b/testing/test_aux.sh @@ -62,7 +62,7 @@ site_path: inst/test_site schema_path: schemas/udmi interfaces: faux-1: - opts: brute broadcast_client ntp_pass + opts: brute broadcast_client ntp_pass ssh faux-2: opts: nobrute expiredtls bacnetfail pubber passwordfail ntp_fail opendns faux-3: @@ -114,6 +114,7 @@ capture_test_results tls capture_test_results password capture_test_results discover capture_test_results network +capture_test_results ssh # Capture peripheral logs more inst/run-port-*/scans/ip_triggers.txt | cat From 83f83757617c5bd64c498ea8d4fa46ee4ed19f9e Mon Sep 17 00:00:00 2001 From: Trevor Date: Mon, 6 Jul 2020 18:08:20 -0700 Subject: [PATCH 13/38] Add DAQ version to origin summary (#522) --- firebase/functions/index.js | 13 ++++++---- firebase/public/index.html | 14 +++++------ firebase/public/main.js | 48 ++++++++++++++++++++++--------------- 3 files changed, 45 insertions(+), 30 deletions(-) diff --git a/firebase/functions/index.js b/firebase/functions/index.js index f17a3d2c6c..12b6a11875 100644 --- a/firebase/functions/index.js +++ b/firebase/functions/index.js @@ -123,7 +123,7 @@ function handleTestResult(origin, siteName, message) { const deviceDoc = originDoc.collection('device').doc(message.device_id); const updates = [ - originDoc.set({ 'updated': timestamp }), + originDoc.set({ 'updated': timestamp }, { merge: true }), siteDoc.set({ 'updated': timestamp }), portDoc.set({ 'updated': timestamp }), deviceDoc.set({ 'updated': timestamp }) @@ -193,17 +193,22 @@ function handleTestResult(origin, siteName, message) { function handleHeartbeat(origin, message) { const timestamp = new Date().toJSON(); const originDoc = db.collection('origin').doc(origin); - console.log('heartbeat', timestamp, origin) + console.log('heartbeat', timestamp, origin, message) const heartbeatDoc = originDoc.collection('runner').doc('heartbeat'); return Promise.all([ - originDoc.set({ 'updated': timestamp }), + originDoc.set({ + 'updated': timestamp, + 'version': message.version + }), heartbeatDoc.get().then((result) => { const current = result.data(); - if (!current || !current.message || current.message.timestamp < message.timestamp) + const defined = current && current.message && current.message.timestamp; + if (!defined || current.message.timestamp < message.timestamp) { return heartbeatDoc.set({ 'updated': timestamp, message }); + } }) ]); } diff --git a/firebase/public/index.html b/firebase/public/index.html index d9ce546c59..c4928cd090 100644 --- a/firebase/public/index.html +++ b/firebase/public/index.html @@ -31,14 +31,14 @@

Filters:

-
- - -
+
+ + +
@@ -52,10 +52,10 @@

Filters:

-

Sites

-

Origins

+

Sites

+

Users

@@ -84,4 +84,4 @@

Users

if (typeof daq_deploy_version !== 'undefined') { document.getElementById('deploy-version').innerHTML = daq_deploy_version; } - \ No newline at end of file + diff --git a/firebase/public/main.js b/firebase/public/main.js index 09f4237a5e..a6eeee087f 100644 --- a/firebase/public/main.js +++ b/firebase/public/main.js @@ -8,11 +8,6 @@ const display_columns = []; const display_rows = []; const row_timestamps = {}; -const data_state = {}; - -let last_result_time_sec = 0; -let heartbeatTimestamp = 0; - const origin_id = getQueryParam('origin'); const site_name = getQueryParam('site'); const port_id = getQueryParam('port'); @@ -21,8 +16,13 @@ const device_id = getQueryParam('device'); const run_id = getQueryParam('runid'); const from = getQueryParam('from'); const to = getQueryParam('to'); + +const data_state = {}; +let last_result_time_sec = 0; +let heartbeatTimestamp = 0; var db; -var activePorts = []; +var activePorts = new Set(); + document.addEventListener('DOMContentLoaded', () => { db = firebase.firestore(); const settings = { @@ -289,7 +289,7 @@ function watcherAdd(ref, collection, limit, handler) { }, (e) => console.error(e)); } -function listSites(db) { +function listSites() { const linkGroup = document.querySelector('#listings .sites'); db.collection('site').get().then((snapshot) => { snapshot.forEach((site_doc) => { @@ -303,21 +303,31 @@ function listSites(db) { }).catch((e) => statusUpdate('registry list error', e)); } -function listOrigins(db) { - const linkGroup = document.querySelector('#listings .origins'); +function addOrigin(originId) { + db.collection('origin').doc(originId).get().then((result) => { + const linkGroup = document.querySelector('#listings .origins'); + const originLink = document.createElement('a'); + originLink.setAttribute('href', '/?origin=' + originId); + originLink.innerHTML = originId; + linkGroup.appendChild(originLink); + const originInfo = document.createElement('span'); + const version = result.data() && result.data().version; + const updated = result.data() && result.data().updated; + originInfo.innerHTML = ` ${version}, ${updated}`; + linkGroup.appendChild(originInfo); + linkGroup.appendChild(document.createElement('p')); + }); +} + +function listOrigins() { db.collection('origin').get().then((snapshot) => { snapshot.forEach((originDoc) => { - const origin = originDoc.id; - const originLink = document.createElement('a'); - originLink.setAttribute('href', '/?origin=' + origin); - originLink.innerHTML = origin; - linkGroup.appendChild(originLink); - linkGroup.appendChild(document.createElement('p')); + addOrigin(originDoc.id); }); }).catch((e) => statusUpdate('origin list error', e)); } -function listUsers(db) { +function listUsers() { const link_group = document.querySelector('#listings .users'); db.collection('users').get().then((snapshot) => { snapshot.forEach((user_doc) => { @@ -354,9 +364,9 @@ function dashboardSetup() { triggerOrigin(db, origin_id); } else { document.getElementById('listings').classList.add('active'); - listSites(db); - listOrigins(db); - listUsers(db); + listOrigins(); + listSites(); + listUsers(); } return origin_id; From 06e4660f16a1611b445cc95fa10838f82e73cfb3 Mon Sep 17 00:00:00 2001 From: Puneet Date: Mon, 6 Jul 2020 23:51:17 -0700 Subject: [PATCH 14/38] 1.7.0 release --- docs/changelog.md | 10 +++++++-- etc/docker_images.txt | 48 +++++++++++++++++++++---------------------- etc/docker_images.ver | 2 +- 3 files changed, 33 insertions(+), 27 deletions(-) diff --git a/docs/changelog.md b/docs/changelog.md index c27652fa90..e3a796f4a1 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,11 +1,17 @@ # Changelog +* 1.7.0 + * Add DAQ version to origin summary (#522) + * Add check for git version tag in Travis (#519) + * Minor UDMI updates for pubber keygen + * Update Minimum Send Test (#498) + * Universal Switch Interface (USI) (#496) * 1.6.1 - * fix image pull in cmd/build (#503) + * fix image pull in cmd/build (#503) * 1.6.0 * cloud test setup documentation (#495) * Baseline for NTP tests (#494) * Baseline for DNS test (#492) - * Add manual test summary to test report (#481) + * Add manual test summary to test report (#481) * UDMI logentry schema update (#391) * 1.5.1 * Fix for local-port-as-string issue (#477) diff --git a/etc/docker_images.txt b/etc/docker_images.txt index 6260cbd020..78ef0f7a7d 100644 --- a/etc/docker_images.txt +++ b/etc/docker_images.txt @@ -1,24 +1,24 @@ -daqf/aardvark df091e4d5825 -daqf/default d2544deaa6e1 -daqf/faucet b5fef9b579ff -daqf/faux1 221259034e61 -daqf/faux2 e6beb911f3eb -daqf/gauge 9cd676c425de -daqf/networking 410cc21c55fa -daqf/switch 2229d5b7071c -daqf/test_bacext ddfb25affc3a -daqf/test_bacnet c8aa2fc90b87 -daqf/test_brute 09e833bf46f0 -daqf/test_discover 6126fe95e495 -daqf/test_fail 4c4df8d524fa -daqf/test_hold 4a8f5ab032be -daqf/test_macoui 5925b633d2f5 -daqf/test_manual 10ad9b86ec3e -daqf/test_mudgee 370369e124a8 -daqf/test_nmap 393fd39e9b0f -daqf/test_pass 0f8e341c292b -daqf/test_password 5be042269e32 -daqf/test_ping 486bb80e9dd6 -daqf/test_switch 082159ca2f27 -daqf/test_tls d2320f042174 -daqf/test_udmi 07efea3d4641 +daqf/aardvark 13e07616906a +daqf/default 8547decf4b0c +daqf/faucet 0bd65761a824 +daqf/faux1 500fc556e362 +daqf/faux2 65be2e8aaff5 +daqf/gauge 399cf3f0cf26 +daqf/networking e7d0b7cea324 +daqf/switch 0a7b905f10fa +daqf/test_bacext 765b3fd4f471 +daqf/test_bacnet 1cdac0876850 +daqf/test_brute 9d046780449f +daqf/test_discover 6bb39aebc6d9 +daqf/test_fail 21b8d383d676 +daqf/test_hold 2c2dbda2fb23 +daqf/test_macoui 890bc044e327 +daqf/test_manual 156a1947c7f4 +daqf/test_mudgee 44a4ad7a9615 +daqf/test_nmap 6e97b5498219 +daqf/test_pass 95e9680cef60 +daqf/test_password 1bc14db7767e +daqf/test_ping 45e3f58e30a2 +daqf/test_switch 57cf3951b2e3 +daqf/test_tls f93b7fec95a4 +daqf/test_udmi 771e5969564d diff --git a/etc/docker_images.ver b/etc/docker_images.ver index 9c6d6293b1..bd8bf882d0 100644 --- a/etc/docker_images.ver +++ b/etc/docker_images.ver @@ -1 +1 @@ -1.6.1 +1.7.0 From 499bff98914abe5a5c407b7f21502e57efc75b33 Mon Sep 17 00:00:00 2001 From: henry54809 Date: Tue, 7 Jul 2020 13:09:06 -0700 Subject: [PATCH 15/38] Feature/usi OVS switch (#521) --- usi/pom.xml | 2 +- .../java/daq/usi/BaseSwitchController.java | 169 +++++++++++++++ .../main/java/daq/usi/SwitchController.java | 196 +----------------- .../daq/usi/SwitchTelnetClientSocket.java | 21 +- usi/src/main/java/daq/usi/UsiImpl.java | 63 +++--- .../daq/usi/allied/AlliedTelesisX230.java | 46 ++-- .../main/java/daq/usi/cisco/Cisco9300.java | 40 ++-- .../main/java/daq/usi/ovs/OpenVSwitch.java | 85 ++++++++ usi/src/main/proto/usi.proto | 14 +- ...est.java => BaseSwitchControllerTest.java} | 6 +- .../java/daq/usi/ovs/OpenVSwitchTest.java | 29 +++ usi/src/test/resources/ovs_output.txt | 24 +++ 12 files changed, 389 insertions(+), 306 deletions(-) create mode 100644 usi/src/main/java/daq/usi/BaseSwitchController.java create mode 100644 usi/src/main/java/daq/usi/ovs/OpenVSwitch.java rename usi/src/test/java/daq/usi/{SwitchControllerTest.java => BaseSwitchControllerTest.java} (85%) create mode 100644 usi/src/test/java/daq/usi/ovs/OpenVSwitchTest.java create mode 100644 usi/src/test/resources/ovs_output.txt diff --git a/usi/pom.xml b/usi/pom.xml index 7d87e5c53d..623acc4d95 100644 --- a/usi/pom.xml +++ b/usi/pom.xml @@ -59,7 +59,7 @@ org.junit.jupiter junit-jupiter - RELEASE + 5.6.2 compile diff --git a/usi/src/main/java/daq/usi/BaseSwitchController.java b/usi/src/main/java/daq/usi/BaseSwitchController.java new file mode 100644 index 0000000000..247930b298 --- /dev/null +++ b/usi/src/main/java/daq/usi/BaseSwitchController.java @@ -0,0 +1,169 @@ +package daq.usi; + +import java.util.HashMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +public abstract class BaseSwitchController implements SwitchController { + /** + * Terminal Prompt ends with '#' when enabled, '>' when not enabled. + */ + public static final String CONSOLE_PROMPT_ENDING_ENABLED = "#"; + public static final String CONSOLE_PROMPT_ENDING_LOGIN = ">"; + public static final int TELNET_PORT = 23; + + // Define Common Variables Required for All Switch Interrogators + protected SwitchTelnetClientSocket telnetClientSocket; + protected Thread telnetClientSocketThread; + protected String remoteIpAddress; + protected boolean debug; + protected String username; + protected String password; + protected boolean userAuthorised = false; + protected boolean userEnabled = false; + protected String hostname = null; + protected boolean commandPending = false; + + public BaseSwitchController(String remoteIpAddress, String username, + String password) { + this(remoteIpAddress, username, password, false); + } + + /** + * Abstract Switch controller. Override this class for switch specific implementation + * + * @param remoteIpAddress switch ip address + * @param username switch username + * @param password switch password + * @param debug for verbose logging + */ + public BaseSwitchController( + String remoteIpAddress, String username, String password, boolean debug) { + this.remoteIpAddress = remoteIpAddress; + this.username = username; + this.password = password; + this.debug = debug; + telnetClientSocket = + new SwitchTelnetClientSocket(remoteIpAddress, TELNET_PORT, this, debug); + } + + /** + * Map a simple table containing a header and 1 row of data to a hashmap + * This method will also attempt to correct for mis-aligned tabular data as well as empty + * columns values. + * + * @param rawPacket Raw table response from a switch command + * @param colNames Array containing the names of the columns in the response + * @param mapNames Array containing names key names to map values to + * @return A HashMap containing the values mapped to the key names provided in the mapNames array + */ + protected static HashMap mapSimpleTable( + String rawPacket, String[] colNames, String[] mapNames) { + HashMap colMap = new HashMap<>(); + String[] lines = rawPacket.split("\n"); + if (lines.length > 0) { + String header = lines[0].trim(); + String values = lines[1].trim(); + int lastSectionEnd = 0; + for (int i = 0; i < colNames.length; ++i) { + int secStart = lastSectionEnd; + int secEnd; + if ((i + 1) >= colNames.length) { + // Resolving last column + secEnd = values.length(); + } else { + // Tabular data is not always reported in perfectly alignment, we need to calculate the + // correct values based off of the sections in between white spaces + int firstWhiteSpace = + getFirstWhiteSpace(values.substring(lastSectionEnd)) + lastSectionEnd; + int lastWhiteSpace = + getIndexOfNonWhitespaceAfterWhitespace(values.substring(firstWhiteSpace)) + + firstWhiteSpace; + int nextHeaderStart = header.indexOf(colNames[i + 1]); + secEnd = Math.min(lastWhiteSpace, nextHeaderStart); + } + lastSectionEnd = secEnd; + String sectionRaw = values.substring(secStart, secEnd).trim(); + colMap.put(mapNames[i], sectionRaw); + } + } + return colMap; + } + + private static int getFirstWhiteSpace(String string) { + char[] characters = string.toCharArray(); + for (int i = 0; i < string.length(); i++) { + if (Character.isWhitespace(characters[i])) { + return i; + } + } + return -1; + } + + private static int getIndexOfNonWhitespaceAfterWhitespace(String string) { + char[] characters = string.toCharArray(); + boolean lastWhitespace = false; + for (int i = 0; i < string.length(); i++) { + if (Character.isWhitespace(characters[i])) { + lastWhitespace = true; + } else if (lastWhitespace) { + return i; + } + } + return -1; + } + + protected boolean containsPrompt(String consoleData) { + // Prompts usually hostname# or hostname(config)# + Pattern r = Pattern.compile(hostname + "\\s*(\\(.+\\))?" + CONSOLE_PROMPT_ENDING_ENABLED, 'g'); + Matcher m = r.matcher(consoleData); + return m.find(); + } + + protected boolean promptReady(String consoleData) { + // Prompts usually hostname# or hostname(config)# + Pattern r = Pattern.compile(hostname + "\\s*(\\(.+\\))?" + CONSOLE_PROMPT_ENDING_ENABLED + "$"); + Matcher m = r.matcher(consoleData); + return m.find(); + } + + /** + * Receive the raw data packet from the telnet connection and process accordingly. + * + * @param consoleData Most recent data read from the telnet socket buffer + */ + public void receiveData(String consoleData) { + if (debug) { + System.out.println( + java.time.LocalTime.now() + " receivedData:\t" + consoleData); + } + if (consoleData != null) { + try { + consoleData = consoleData.trim(); + if (!userAuthorised) { + handleLoginMessage(consoleData); + } else if (!userEnabled) { + handleEnableMessage(consoleData); + } else { + parseData(consoleData); + } + } catch (Exception e) { + telnetClientSocket.disposeConnection(); + e.printStackTrace(); + } + } + } + + protected abstract void parseData(String consoleData) throws Exception; + + protected abstract void handleLoginMessage(String consoleData) throws Exception; + + protected abstract void handleEnableMessage(String consoleData) throws Exception; + + @Override + public void start() { + telnetClientSocketThread = new Thread(telnetClientSocket); + telnetClientSocketThread.start(); + } +} diff --git a/usi/src/main/java/daq/usi/SwitchController.java b/usi/src/main/java/daq/usi/SwitchController.java index f9325c9ec0..82ae4ce663 100644 --- a/usi/src/main/java/daq/usi/SwitchController.java +++ b/usi/src/main/java/daq/usi/SwitchController.java @@ -1,201 +1,21 @@ package daq.usi; -/* - * Licensed to Google under one or more contributor license agreements. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import grpc.Interface; -import grpc.Power; +import grpc.InterfaceResponse; +import grpc.PowerResponse; import grpc.SwitchActionResponse; -import java.util.HashMap; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - - -public abstract class SwitchController implements Runnable { - /** - * Terminal Prompt ends with '#' when enabled, '>' when not enabled. - */ - public static final String CONSOLE_PROMPT_ENDING_ENABLED = "#"; - public static final String CONSOLE_PROMPT_ENDING_LOGIN = ">"; - - // Define Common Variables Required for All Switch Interrogators - protected SwitchTelnetClientSocket telnetClientSocket; - protected Thread telnetClientSocketThread; - protected String remoteIpAddress; - protected int telnetPort; - protected boolean debug; - protected String username; - protected String password; - protected boolean userAuthorised = false; - protected boolean userEnabled = false; - protected String hostname = null; - protected boolean commandPending = false; - - public SwitchController(String remoteIpAddress, int telnetPort, String username, - String password) { - this(remoteIpAddress, telnetPort, username, password, false); - } - - /** - * Abstract Switch controller. Override this class for switch specific implementation - * - * @param remoteIpAddress switch ip address - * @param telnetPort switch telnet port - * @param username switch username - * @param password switch password - * @param debug for verbose logging - */ - public SwitchController( - String remoteIpAddress, int telnetPort, String username, String password, boolean debug) { - this.remoteIpAddress = remoteIpAddress; - this.telnetPort = telnetPort; - this.username = username; - this.password = password; - this.debug = debug; - telnetClientSocket = - new SwitchTelnetClientSocket(remoteIpAddress, telnetPort, this, debug); - } - - protected boolean containsPrompt(String consoleData) { - // Prompts usually hostname# or hostname(config)# - Pattern r = Pattern.compile(hostname + "\\s*(\\(.+\\))?" + CONSOLE_PROMPT_ENDING_ENABLED, 'g'); - Matcher m = r.matcher(consoleData); - return m.find(); - } - - protected boolean promptReady(String consoleData) { - // Prompts usually hostname# or hostname(config)# - Pattern r = Pattern.compile(hostname + "\\s*(\\(.+\\))?" + CONSOLE_PROMPT_ENDING_ENABLED + "$"); - Matcher m = r.matcher(consoleData); - return m.find(); - } - - /** - * Receive the raw data packet from the telnet connection and process accordingly. - * - * @param consoleData Most recent data read from the telnet socket buffer - */ - public void receiveData(String consoleData) { - if (debug) { - System.out.println( - java.time.LocalTime.now() + " receivedData:\t" + consoleData); - } - if (consoleData != null) { - try { - consoleData = consoleData.trim(); - if (!userAuthorised) { - handleLoginMessage(consoleData); - } else if (!userEnabled) { - handleEnableMessage(consoleData); - } else { - parseData(consoleData); - } - } catch (Exception e) { - telnetClientSocket.disposeConnection(); - e.printStackTrace(); - } - } - } - - /** - * Map a simple table containing a header and 1 row of data to a hashmap - * This method will also attempt to correct for mis-aligned tabular data as well as empty - * columns values. - * - * @param rawPacket Raw table response from a switch command - * @param colNames Array containing the names of the columns in the response - * @param mapNames Array containing names key names to map values to - * @return A HashMap containing the values mapped to the key names provided in the mapNames array - */ - protected static HashMap mapSimpleTable( - String rawPacket, String[] colNames, String[] mapNames) { - HashMap colMap = new HashMap<>(); - String[] lines = rawPacket.split("\n"); - if (lines.length > 0) { - String header = lines[0].trim(); - String values = lines[1].trim(); - int lastSectionEnd = 0; - for (int i = 0; i < colNames.length; ++i) { - int secStart = lastSectionEnd; - int secEnd; - if ((i + 1) >= colNames.length) { - // Resolving last column - secEnd = values.length(); - } else { - // Tabular data is not always reported in perfectly alignment, we need to calculate the - // correct values based off of the sections in between white spaces - int firstWhiteSpace = - getFirstWhiteSpace(values.substring(lastSectionEnd)) + lastSectionEnd; - int lastWhiteSpace = - getIndexOfNonWhitespaceAfterWhitespace(values.substring(firstWhiteSpace)) - + firstWhiteSpace; - int nextHeaderStart = header.indexOf(colNames[i + 1]); - secEnd = Math.min(lastWhiteSpace, nextHeaderStart); - } - lastSectionEnd = secEnd; - String sectionRaw = values.substring(secStart, secEnd).trim(); - colMap.put(mapNames[i], sectionRaw); - } - } - return colMap; - } - - - private static int getFirstWhiteSpace(String string) { - char[] characters = string.toCharArray(); - for (int i = 0; i < string.length(); i++) { - if (Character.isWhitespace(characters[i])) { - return i; - } - } - return -1; - } - - private static int getIndexOfNonWhitespaceAfterWhitespace(String string) { - char[] characters = string.toCharArray(); - boolean lastWhitespace = false; - for (int i = 0; i < string.length(); i++) { - if (Character.isWhitespace(characters[i])) { - lastWhitespace = true; - } else if (lastWhitespace) { - return i; - } - } - return -1; - } - - protected abstract void parseData(String consoleData) throws Exception; - - protected abstract void handleLoginMessage(String consoleData) throws Exception; - protected abstract void handleEnableMessage(String consoleData) throws Exception; +public interface SwitchController { - public abstract void getPower(int devicePort, ResponseHandler handler) throws Exception; + void getPower(int devicePort, ResponseHandler handler) throws Exception; - public abstract void getInterface(int devicePort, ResponseHandler handler) + void getInterface(int devicePort, ResponseHandler handler) throws Exception; - public abstract void connect(int devicePort, ResponseHandler handler) + void connect(int devicePort, ResponseHandler handler) throws Exception; - public abstract void disconnect(int devicePort, ResponseHandler handler) + void disconnect(int devicePort, ResponseHandler handler) throws Exception; - @Override - public void run() { - telnetClientSocketThread = new Thread(telnetClientSocket); - telnetClientSocketThread.start(); - } + void start(); } diff --git a/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java b/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java index 7a73380612..a8349ff8a1 100644 --- a/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java +++ b/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java @@ -1,22 +1,5 @@ package daq.usi; -/* - * Licensed to Google under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -39,7 +22,7 @@ public class SwitchTelnetClientSocket implements TelnetNotificationHandler, Runn protected static final int MAX_EMPTY_WAIT_COUNT = 70; protected TelnetClient telnetClient; - protected SwitchController interrogator; + protected BaseSwitchController interrogator; protected String remoteIpAddress = ""; protected int remotePort = 23; @@ -62,7 +45,7 @@ public class SwitchTelnetClientSocket implements TelnetNotificationHandler, Runn * @param debug For more verbose output. */ public SwitchTelnetClientSocket( - String remoteIpAddress, int remotePort, SwitchController interrogator, boolean debug) { + String remoteIpAddress, int remotePort, BaseSwitchController interrogator, boolean debug) { this.remoteIpAddress = remoteIpAddress; this.remotePort = remotePort; this.interrogator = interrogator; diff --git a/usi/src/main/java/daq/usi/UsiImpl.java b/usi/src/main/java/daq/usi/UsiImpl.java index 41dfea419f..6cda498025 100644 --- a/usi/src/main/java/daq/usi/UsiImpl.java +++ b/usi/src/main/java/daq/usi/UsiImpl.java @@ -2,8 +2,9 @@ import daq.usi.allied.AlliedTelesisX230; import daq.usi.cisco.Cisco9300; -import grpc.Interface; -import grpc.Power; +import daq.usi.ovs.OpenVSwitch; +import grpc.InterfaceResponse; +import grpc.PowerResponse; import grpc.SwitchActionResponse; import grpc.SwitchInfo; import grpc.USIServiceGrpc; @@ -12,41 +13,48 @@ import java.util.Map; public class UsiImpl extends USIServiceGrpc.USIServiceImplBase { - private Map switchControllers; + private final Map switchControllers; public UsiImpl() { super(); switchControllers = new HashMap<>(); } - private SwitchController getSwitchController(SwitchInfo switchInfo) { - String repr = String.join(",", switchInfo.getModel().toString(), switchInfo.getIpAddr(), - String.valueOf(switchInfo.getTelnetPort()), switchInfo.getUsername(), - switchInfo.getPassword()); - SwitchController sc = switchControllers.get(repr); - if (sc == null) { - switch (switchInfo.getModel()) { - case ALLIED_TELESIS_X230: { - sc = new AlliedTelesisX230(switchInfo.getIpAddr(), switchInfo.getTelnetPort(), - switchInfo.getUsername(), switchInfo.getPassword()); - break; - } - case CISCO_9300: { - sc = new Cisco9300(switchInfo.getIpAddr(), switchInfo.getTelnetPort(), - switchInfo.getUsername(), switchInfo.getPassword()); - break; - } - default: - break; + private SwitchController createController(SwitchInfo switchInfo) { + SwitchController newController; + switch (switchInfo.getModel()) { + case ALLIED_TELESIS_X230: { + newController = + new AlliedTelesisX230(switchInfo.getIpAddr(), switchInfo.getUsername(), + switchInfo.getPassword()); + break; + } + case CISCO_9300: { + newController = new Cisco9300(switchInfo.getIpAddr(), switchInfo.getUsername(), + switchInfo.getPassword()); + break; + } + case OVS_SWITCH: { + newController = new OpenVSwitch(); + break; } - new Thread(sc).start(); - switchControllers.put(repr, sc); + default: + throw new IllegalArgumentException("Unrecognized switch model " + + switchInfo.getModel()); } - return sc; + newController.start(); + return newController; + } + + private SwitchController getSwitchController(SwitchInfo switchInfo) { + String repr = String.join(",", switchInfo.getModel().toString(), + switchInfo.getIpAddr(), switchInfo.getUsername(), + switchInfo.getPassword()); + return switchControllers.computeIfAbsent(repr, key -> createController(switchInfo)); } @Override - public void getPower(SwitchInfo request, StreamObserver responseObserver) { + public void getPower(SwitchInfo request, StreamObserver responseObserver) { SwitchController sc = getSwitchController(request); try { sc.getPower(request.getDevicePort(), responseObserver::onNext); @@ -57,7 +65,8 @@ public void getPower(SwitchInfo request, StreamObserver responseObserver) } @Override - public void getInterface(SwitchInfo request, StreamObserver responseObserver) { + public void getInterface(SwitchInfo request, + StreamObserver responseObserver) { SwitchController sc = getSwitchController(request); try { sc.getInterface(request.getDevicePort(), responseObserver::onNext); diff --git a/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java b/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java index f45b518542..50007a2086 100644 --- a/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java +++ b/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java @@ -1,27 +1,12 @@ package daq.usi.allied; -/* - * Licensed to the Google under one or more contributor license agreements. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - +import daq.usi.BaseSwitchController; import daq.usi.ResponseHandler; -import daq.usi.SwitchController; -import grpc.Interface; +import grpc.InterfaceResponse; import grpc.LinkStatus; import grpc.POEStatus; import grpc.POESupport; -import grpc.Power; +import grpc.PowerResponse; import grpc.SwitchActionResponse; import java.util.Arrays; import java.util.HashMap; @@ -33,7 +18,7 @@ import java.util.stream.Collectors; -public class AlliedTelesisX230 extends SwitchController { +public class AlliedTelesisX230 extends BaseSwitchController { private static final String[] powerExpected = {"dev_interface", "admin", "pri", "oper", "power", "device", "dev_class", "max"}; private static final String[] showPowerExpected = @@ -55,33 +40,29 @@ public class AlliedTelesisX230 extends SwitchController { * ATX230 Switch Controller. * * @param remoteIpAddress switch ip address - * @param telnetPort switch telnet port * @param user switch username * @param password switch password */ public AlliedTelesisX230( String remoteIpAddress, - int telnetPort, String user, String password) { - this(remoteIpAddress, telnetPort, user, password, false); + this(remoteIpAddress, user, password, false); } /** * ATX230 Switch Controller. * * @param remoteIpAddress switch ip address - * @param telnetPort switch telnet port * @param user switch username * @param password switch password - * @param debug for verbose output + * @param debug for verbose output */ public AlliedTelesisX230( String remoteIpAddress, - int telnetPort, String user, String password, boolean debug) { - super(remoteIpAddress, telnetPort, user, password, debug); + super(remoteIpAddress, user, password, debug); this.username = user == null ? "manager" : user; this.password = password == null ? "friend" : password; } @@ -126,7 +107,7 @@ private String[] portManagementCommand(int interfacePort, boolean enabled) { @Override - public void getPower(int devicePort, ResponseHandler handler) throws Exception { + public void getPower(int devicePort, ResponseHandler handler) throws Exception { while (commandPending) { Thread.sleep(WAIT_MS); } @@ -145,7 +126,8 @@ public void getPower(int devicePort, ResponseHandler handler) throws Exce } @Override - public void getInterface(int devicePort, ResponseHandler handler) throws Exception { + public void getInterface(int devicePort, ResponseHandler handler) + throws Exception { while (commandPending) { Thread.sleep(WAIT_MS); } @@ -199,8 +181,8 @@ public void disconnect(int devicePort, ResponseHandler han managePort(devicePort, handler, false); } - private Interface buildInterfaceResponse(Map interfaceMap) { - Interface.Builder response = Interface.newBuilder(); + private InterfaceResponse buildInterfaceResponse(Map interfaceMap) { + InterfaceResponse.Builder response = InterfaceResponse.newBuilder(); String duplex = interfaceMap.getOrDefault("duplex", ""); int speed = 0; try { @@ -215,8 +197,8 @@ private Interface buildInterfaceResponse(Map interfaceMap) { .build(); } - private Power buildPowerResponse(Map powerMap) { - Power.Builder response = Power.newBuilder(); + private PowerResponse buildPowerResponse(Map powerMap) { + PowerResponse.Builder response = PowerResponse.newBuilder(); float maxPower = 0; float currentPower = 0; try { diff --git a/usi/src/main/java/daq/usi/cisco/Cisco9300.java b/usi/src/main/java/daq/usi/cisco/Cisco9300.java index 83f1d19550..1dd3683e6c 100644 --- a/usi/src/main/java/daq/usi/cisco/Cisco9300.java +++ b/usi/src/main/java/daq/usi/cisco/Cisco9300.java @@ -1,27 +1,12 @@ package daq.usi.cisco; -/* - * Licensed to Google under one or more contributor license agreements. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - +import daq.usi.BaseSwitchController; import daq.usi.ResponseHandler; -import daq.usi.SwitchController; -import grpc.Interface; +import grpc.InterfaceResponse; import grpc.LinkStatus; import grpc.POEStatus; import grpc.POESupport; -import grpc.Power; +import grpc.PowerResponse; import grpc.SwitchActionResponse; import java.util.Arrays; import java.util.HashMap; @@ -31,7 +16,7 @@ import java.util.stream.Collectors; -public class Cisco9300 extends SwitchController { +public class Cisco9300 extends BaseSwitchController { private static final String[] interfaceExpected = {"interface", "name", "status", "vlan", "duplex", "speed", "type"}; @@ -55,16 +40,14 @@ public class Cisco9300 extends SwitchController { * Cisco 9300 Switch Controller. * * @param remoteIpAddress switch ip - * @param telnetPort switch telnet port * @param user switch username * @param password switch password */ public Cisco9300( String remoteIpAddress, - int telnetPort, String user, String password) { - super(remoteIpAddress, telnetPort, user, password); + super(remoteIpAddress, user, password); this.username = user == null ? "admin" : user; this.password = password == null ? "password" : password; } @@ -156,7 +139,7 @@ public void parseData(String consoleData) throws Exception { } @Override - public void getPower(int devicePort, ResponseHandler powerResponseHandler) + public void getPower(int devicePort, ResponseHandler powerResponseHandler) throws Exception { while (commandPending) { Thread.sleep(WAIT_MS); @@ -176,7 +159,8 @@ public void getPower(int devicePort, ResponseHandler powerResponseHandler } @Override - public void getInterface(int devicePort, ResponseHandler handler) throws Exception { + public void getInterface(int devicePort, ResponseHandler handler) + throws Exception { while (commandPending) { Thread.sleep(WAIT_MS); } @@ -230,8 +214,8 @@ public void disconnect(int devicePort, ResponseHandler han managePort(devicePort, handler, false); } - private Interface buildInterfaceResponse(Map interfaceMap) { - Interface.Builder response = Interface.newBuilder(); + private InterfaceResponse buildInterfaceResponse(Map interfaceMap) { + InterfaceResponse.Builder response = InterfaceResponse.newBuilder(); String duplex = interfaceMap.getOrDefault("duplex", ""); if (duplex.startsWith("a-")) { // Interface in Auto Duplex duplex = duplex.replaceFirst("a-", ""); @@ -249,8 +233,8 @@ private Interface buildInterfaceResponse(Map interfaceMap) { .build(); } - private Power buildPowerResponse(Map powerMap) { - Power.Builder response = Power.newBuilder(); + private PowerResponse buildPowerResponse(Map powerMap) { + PowerResponse.Builder response = PowerResponse.newBuilder(); float maxPower = Float.parseFloat(powerMap.get("max")); float currentPower = Float.parseFloat(powerMap.get("power")); diff --git a/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java b/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java new file mode 100644 index 0000000000..691a04eeb5 --- /dev/null +++ b/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java @@ -0,0 +1,85 @@ +package daq.usi.ovs; + +import daq.usi.ResponseHandler; +import daq.usi.SwitchController; +import grpc.InterfaceResponse; +import grpc.LinkStatus; +import grpc.POEStatus; +import grpc.POESupport; +import grpc.PowerResponse; +import grpc.SwitchActionResponse; +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.net.URL; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class OpenVSwitch implements SwitchController { + + private static final String OVS_OUTPUT_FILE = "ovs_output.txt"; + + protected String getInterfaceByPort(int devicePort) throws FileNotFoundException { + URL file = OpenVSwitch.class.getClassLoader().getResource(OVS_OUTPUT_FILE); + if (file == null) { + throw new FileNotFoundException(OVS_OUTPUT_FILE + " is not found!"); + } + FileReader reader = new FileReader(file.getFile()); + BufferedReader bufferedReader = new BufferedReader(reader); + Pattern pattern = Pattern.compile("(^\\s*" + devicePort + ")(\\((.+)\\))(:.*)", 'g'); + String interfaceLine = bufferedReader.lines().filter(line -> { + Matcher m = pattern.matcher(line); + return m.find(); + }).findFirst().get(); + + Matcher m = pattern.matcher(interfaceLine); + m.matches(); + return m.group(3); + } + + @Override + public void getPower(int devicePort, ResponseHandler handler) throws Exception { + PowerResponse.Builder response = PowerResponse.newBuilder(); + PowerResponse power = response.setPoeStatus(POEStatus.OFF) + .setPoeSupport(POESupport.DISABLED) + .setMaxPowerConsumption(0) + .setCurrentPowerConsumption(0).build(); + handler.receiveData(power); + } + + @Override + public void getInterface(int devicePort, ResponseHandler handler) + throws Exception { + InterfaceResponse.Builder response = InterfaceResponse.newBuilder(); + InterfaceResponse iface = response.setLinkStatus(LinkStatus.UP) + .setDuplex("") + .setLinkSpeed(0) + .build(); + handler.receiveData(iface); + } + + private void managePort(int devicePort, ResponseHandler handler, + boolean enabled) throws Exception { + String iface = getInterfaceByPort(devicePort); + ProcessBuilder processBuilder = new ProcessBuilder(); + processBuilder.command("bash", "-c", "ifconfig " + iface + (enabled ? " up" : " down")); + Process process = processBuilder.start(); + int exitCode = process.waitFor(); + handler.receiveData(SwitchActionResponse.newBuilder().setSuccess(exitCode == 0).build()); + } + + @Override + public void connect(int devicePort, ResponseHandler handler) + throws Exception { + managePort(devicePort, handler, true); + } + + @Override + public void disconnect(int devicePort, ResponseHandler handler) + throws Exception { + managePort(devicePort, handler, false); + } + + public void start() { + } +} diff --git a/usi/src/main/proto/usi.proto b/usi/src/main/proto/usi.proto index a85db75fbb..6107b0afc7 100644 --- a/usi/src/main/proto/usi.proto +++ b/usi/src/main/proto/usi.proto @@ -9,8 +9,8 @@ option java_outer_classname = "USIProto"; option java_package = "grpc"; service USIService { - rpc GetPower(SwitchInfo) returns (Power) {} - rpc GetInterface(SwitchInfo) returns (Interface) {} + rpc GetPower(SwitchInfo) returns (PowerResponse) {} + rpc GetInterface(SwitchInfo) returns (InterfaceResponse) {} rpc disconnect(SwitchInfo) returns (SwitchActionResponse) {} rpc connect(SwitchInfo) returns (SwitchActionResponse) {} } @@ -19,14 +19,14 @@ message SwitchActionResponse { bool success = 1; } -message Power { +message PowerResponse { float current_power_consumption = 1; float max_power_consumption = 2; POESupport poe_support = 3; POEStatus poe_status = 4; } -message Interface { +message InterfaceResponse { LinkStatus link_status = 1; int32 link_speed = 2; string duplex = 3; @@ -35,6 +35,7 @@ message Interface { enum SwitchModel { ALLIED_TELESIS_X230 = 0; CISCO_9300 = 1; + OVS_SWITCH = 2; } enum LinkStatus { @@ -62,9 +63,6 @@ message SwitchInfo { // IP address of external switch. string ip_addr = 1; - // Telnet Port - int32 telnet_port = 2; - // Device Port int32 device_port = 3; @@ -76,4 +74,4 @@ message SwitchInfo { // Switch connect password string password = 6; -} +} \ No newline at end of file diff --git a/usi/src/test/java/daq/usi/SwitchControllerTest.java b/usi/src/test/java/daq/usi/BaseSwitchControllerTest.java similarity index 85% rename from usi/src/test/java/daq/usi/SwitchControllerTest.java rename to usi/src/test/java/daq/usi/BaseSwitchControllerTest.java index 656f7e0c5e..f2867c1b65 100644 --- a/usi/src/test/java/daq/usi/SwitchControllerTest.java +++ b/usi/src/test/java/daq/usi/BaseSwitchControllerTest.java @@ -8,7 +8,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -class SwitchControllerTest { +class BaseSwitchControllerTest { @BeforeEach void setUp() { @@ -23,7 +23,7 @@ void mapSimpleTableEmptyInput() { String raw = ""; String[] colNames = {"a", "b"}; String[] mapNames = {"a", "b"}; - Map response = SwitchController.mapSimpleTable(raw, colNames, mapNames); + Map response = BaseSwitchController.mapSimpleTable(raw, colNames, mapNames); for (String key : response.keySet()) { assertNull(response.get(key)); } @@ -38,7 +38,7 @@ void mapSimpleTableSampleInputAT() { Map expected = Map.of("interface", "port1.0.1", "admin", "Enabled", "pri", "Low", "oper", "Powered", "power", "3337", "device", "n/a", "class", "0", "max", "15400 [C]"); - Map response = SwitchController.mapSimpleTable(raw, colNames, mapNames); + Map response = BaseSwitchController.mapSimpleTable(raw, colNames, mapNames); for (String key : response.keySet()) { assertEquals(response.get(key), expected.get(key)); } diff --git a/usi/src/test/java/daq/usi/ovs/OpenVSwitchTest.java b/usi/src/test/java/daq/usi/ovs/OpenVSwitchTest.java new file mode 100644 index 0000000000..fb951cf29f --- /dev/null +++ b/usi/src/test/java/daq/usi/ovs/OpenVSwitchTest.java @@ -0,0 +1,29 @@ +package daq.usi.ovs; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.io.FileNotFoundException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class OpenVSwitchTest { + OpenVSwitch ovs; + + @BeforeEach + void setUp() { + ovs = new OpenVSwitch(); + } + + @AfterEach + void tearDown() { + } + + @Test + void getInterfaceByPort() throws FileNotFoundException { + assertEquals(ovs.getInterfaceByPort(1), "faux"); + assertEquals(ovs.getInterfaceByPort(2), "faux-2"); + assertEquals(ovs.getInterfaceByPort(7), "sec-eth7"); + } + +} \ No newline at end of file diff --git a/usi/src/test/resources/ovs_output.txt b/usi/src/test/resources/ovs_output.txt new file mode 100644 index 0000000000..9621772035 --- /dev/null +++ b/usi/src/test/resources/ovs_output.txt @@ -0,0 +1,24 @@ +OFPT_FEATURES_REPLY (xid=0x2): dpid:0000000000000002 +n_tables:254, n_buffers:0 +capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP +actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst + 1(faux): addr:de:06:c6:06:73:bb + config: 0 + state: 0 + current: 10GB-FD COPPER + speed: 10000 Mbps now, 0 Mbps max + 2(faux-2): addr:de:06:c6:06:73:bc + config: 0 + state: 0 + current: 10GB-FD COPPER + speed: 10000 Mbps now, 0 Mbps max + 7(sec-eth7): addr:a2:f2:6f:01:84:d4 + config: 0 + state: 0 + current: 10GB-FD COPPER + speed: 10000 Mbps now, 0 Mbps max + LOCAL(sec): addr:72:87:94:b5:9c:48 + config: PORT_DOWN + state: LINK_DOWN + speed: 0 Mbps now, 0 Mbps max +OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0 From 79768e490d42939f1629cef72545f10516b3e83e Mon Sep 17 00:00:00 2001 From: frgitdaq <62390501+frgitdaq@users.noreply.github.com> Date: Wed, 8 Jul 2020 18:05:03 +0100 Subject: [PATCH 16/38] NTPv4 support (#487) * Added NTPv4 test 25 --- config/modules/all.conf | 3 +- docker/include/bin/start_faux | 12 ++- docs/device_report.md | 23 +++++- .../module_config.json | 3 + .../distech_ecy-s1000/module_config.json | 3 + resources/setups/baseline/module_config.json | 3 + .../qualification/device_module_config.json | 3 + .../device_type_module_config.json | 3 + .../qualification/system_module_config.json | 3 + .../remediation/device_module_config.json | 3 + .../remediation/system_module_config.json | 3 + resources/test_site/module_config.json | 3 + subset/ntp/Dockerfile.test_ntp | 10 +++ subset/ntp/README.md | 17 +++++ subset/ntp/build.conf | 2 + subset/ntp/ntp_tests.py | 74 +++++++++++++++++++ subset/ntp/test_ntp | 9 +++ testing/test_aux.out | 9 +++ testing/test_aux.sh | 5 +- 19 files changed, 184 insertions(+), 7 deletions(-) create mode 100644 subset/ntp/Dockerfile.test_ntp create mode 100644 subset/ntp/README.md create mode 100644 subset/ntp/build.conf create mode 100644 subset/ntp/ntp_tests.py create mode 100755 subset/ntp/test_ntp diff --git a/config/modules/all.conf b/config/modules/all.conf index 4d897ae7c5..59b4b74367 100644 --- a/config/modules/all.conf +++ b/config/modules/all.conf @@ -7,4 +7,5 @@ include subset/connection/build.conf include subset/bacnet/build.conf include subset/security/build.conf include subset/cloud/build.conf -include subset/manual/build.conf \ No newline at end of file +include subset/manual/build.conf +include subset/ntp/build.conf diff --git a/docker/include/bin/start_faux b/docker/include/bin/start_faux index 94c9711ab3..15c85bbf4e 100755 --- a/docker/include/bin/start_faux +++ b/docker/include/bin/start_faux @@ -136,9 +136,15 @@ elif [ -n "${options[bacnetfail]}" ]; then FauxDeviceEngine.EntryPoint $local_ip $broadcast_ip "Faux-Device-Fail.json" & fi -if [ -n "${options[ntp_client]}" ]; then - echo Starting ntp client. - java -jar NTPClient/build/libs/NTPClient-1.0-SNAPSHOT.jar "time.google.com" "123" "3" & +# Queries the NTP server learnt from DHCP. +if [ -n "${options[ntpv4]}" ]; then + dhcp_ntp=$(fgrep NTPSERVERS= /run/ntpdate.dhcp) + ntp_server=`echo $dhcp_ntp | cut -d "'" -f 2` + echo Transmitting NTP query to $ntp_server using NTPv4 + ntpdate -q -o 4 $ntp_server & +elif [ -n "${options[ntpv3]}" ]; then + echo Transmitting NTP query to time.google.com using NTPv3 + ntpdate -q -o 3 time.google.com & fi # ntp_pass queries the NTP server learnt from DHCP. ntp_fail sends to time.google.com diff --git a/docs/device_report.md b/docs/device_report.md index 2ab3ab3320..b22465300b 100644 --- a/docs/device_report.md +++ b/docs/device_report.md @@ -56,7 +56,7 @@ Overall device result FAIL |---|---|---|---|---| |Required|1|0|0|0| |Recommended|2|0|0|0| -|Other|1|2|22|2| +|Other|2|2|22|2| |Result|Test|Category|Expectation|Notes| |---|---|---|---|---| @@ -67,6 +67,7 @@ Overall device result FAIL |skip|cloud.udmi.state|Other|Other|No device id| |skip|cloud.udmi.system|Other|Other|No device id| |fail|connection.mac_oui|Other|Other|Manufacturer prefix not found!| +|pass|connection.network.ntp_support|Other|Other|Using NTPv4.| |skip|connection.port_duplex|Other|Other|No local IP has been set, check system config| |skip|connection.port_link|Other|Other|No local IP has been set, check system config| |skip|connection.port_speed|Other|Other|No local IP has been set, check system config| @@ -567,5 +568,25 @@ RESULT pass manual.test.travis Manual test - for testing |---|---| |enabled|True| +## Module ntp + + +#### Report + +``` +-------------------- +connection.network.ntp_support +-------------------- +Device supports NTP version 4. +-------------------- +RESULT pass connection.network.ntp_support Using NTPv4. +``` + +#### Module Config + +|Attribute|Value| +|---|---| +|enabled|True| + ## Report complete diff --git a/resources/device_types/deltacontrols_o3-din-cpu/module_config.json b/resources/device_types/deltacontrols_o3-din-cpu/module_config.json index 066bf35abb..a17a5a6997 100644 --- a/resources/device_types/deltacontrols_o3-din-cpu/module_config.json +++ b/resources/device_types/deltacontrols_o3-din-cpu/module_config.json @@ -24,6 +24,9 @@ "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "switch": { "enabled": true, "poe": { diff --git a/resources/device_types/distech_ecy-s1000/module_config.json b/resources/device_types/distech_ecy-s1000/module_config.json index d790899c5c..b9a1847754 100644 --- a/resources/device_types/distech_ecy-s1000/module_config.json +++ b/resources/device_types/distech_ecy-s1000/module_config.json @@ -24,6 +24,9 @@ "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "switch": { "enabled": true, "poe": { diff --git a/resources/setups/baseline/module_config.json b/resources/setups/baseline/module_config.json index 8ebd56cc57..ea0f335568 100644 --- a/resources/setups/baseline/module_config.json +++ b/resources/setups/baseline/module_config.json @@ -32,6 +32,9 @@ }, "network": { "enabled": true + }, + "ntp": { + "enabled": true } } } diff --git a/resources/setups/qualification/device_module_config.json b/resources/setups/qualification/device_module_config.json index 6019735d2b..9713c5723b 100644 --- a/resources/setups/qualification/device_module_config.json +++ b/resources/setups/qualification/device_module_config.json @@ -39,6 +39,9 @@ "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "switch": { "enabled": true, "poe": { diff --git a/resources/setups/qualification/device_type_module_config.json b/resources/setups/qualification/device_type_module_config.json index f183d5527c..9d3181460b 100644 --- a/resources/setups/qualification/device_type_module_config.json +++ b/resources/setups/qualification/device_type_module_config.json @@ -33,6 +33,9 @@ "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "switch": { "enabled": true, "poe": { diff --git a/resources/setups/qualification/system_module_config.json b/resources/setups/qualification/system_module_config.json index de3cc56461..8b1926a3f1 100644 --- a/resources/setups/qualification/system_module_config.json +++ b/resources/setups/qualification/system_module_config.json @@ -24,6 +24,9 @@ "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "switch": { "enabled": true, "poe": { diff --git a/resources/setups/remediation/device_module_config.json b/resources/setups/remediation/device_module_config.json index 976761c762..65223dac27 100644 --- a/resources/setups/remediation/device_module_config.json +++ b/resources/setups/remediation/device_module_config.json @@ -35,6 +35,9 @@ "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "switch": { "enabled": true, "poe": { diff --git a/resources/setups/remediation/system_module_config.json b/resources/setups/remediation/system_module_config.json index 17e7793758..754145aa9e 100644 --- a/resources/setups/remediation/system_module_config.json +++ b/resources/setups/remediation/system_module_config.json @@ -24,6 +24,9 @@ "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "switch": { "enabled": true, "poe": { diff --git a/resources/test_site/module_config.json b/resources/test_site/module_config.json index 4d672b67aa..3ff4030a4c 100644 --- a/resources/test_site/module_config.json +++ b/resources/test_site/module_config.json @@ -17,6 +17,9 @@ }, "manual": { "enabled": true + }, + "ntp": { + "enabled": true } }, "process": { diff --git a/subset/ntp/Dockerfile.test_ntp b/subset/ntp/Dockerfile.test_ntp new file mode 100644 index 0000000000..2bfedf5a18 --- /dev/null +++ b/subset/ntp/Dockerfile.test_ntp @@ -0,0 +1,10 @@ +FROM daqf/aardvark:latest + +RUN $AG update && $AG install python python-setuptools python-pip netcat + +RUN pip install scapy + +COPY subset/ntp/ntp_tests.py . +COPY subset/ntp/test_ntp . + +CMD ["./test_ntp"] diff --git a/subset/ntp/README.md b/subset/ntp/README.md new file mode 100644 index 0000000000..b6b4bc3a8e --- /dev/null +++ b/subset/ntp/README.md @@ -0,0 +1,17 @@ +# NTP testing + +## test_ntp +The NTP test inspects client NTP support and version. + +### Note for test developers +The functional test code is included in the `ntp_tests.py` file. + +The test reads packets from startup.pcap. + +If the python code needs debugging, the pip module `scapy` is required (`pip install scapy`). + +### NTP Test conditions +| Test ID | Info | Pass | Fail | Skip | +|---|---|---|---|---| +| connection.network.ntp_support | Are the received NTP packets using NTP v4? | NTP version is 4 | NTP version is not 4 | No NTP packets are received | + diff --git a/subset/ntp/build.conf b/subset/ntp/build.conf new file mode 100644 index 0000000000..febb370b8f --- /dev/null +++ b/subset/ntp/build.conf @@ -0,0 +1,2 @@ +build subset/ntp +add ntp diff --git a/subset/ntp/ntp_tests.py b/subset/ntp/ntp_tests.py new file mode 100644 index 0000000000..2a774f1124 --- /dev/null +++ b/subset/ntp/ntp_tests.py @@ -0,0 +1,74 @@ +from __future__ import absolute_import +import sys +from scapy.all import NTP, rdpcap + +arguments = sys.argv + +test_request = str(arguments[1]) +cap_pcap_file = str(arguments[2]) + +report_filename = 'report.txt' +ignore = '%%' +summary_text = '' +result = 'fail' +dash_break_line = '--------------------\n' +description_ntp_support = 'Device supports NTP version 4.' + + +def write_report(string_to_append): + with open(report_filename, 'a+') as file_open: + file_open.write(string_to_append) + + +# Extracts the NTP version from the first client NTP packet +def ntp_client_version(capture): + client_packets = ntp_packets(capture, 3) + if len(client_packets) == 0: + return None + return client_packets[0].version + + +# Filters the packets by type (NTP) +def ntp_packets(capture, mode=None): + packets = [] + for packet in capture: + if NTP in packet: + ip = packet.payload + udp = ip.payload + ntp = udp.payload + if mode is None or mode == ntp.mode: + packets.append(ntp) + return packets + + +def test_ntp_support(): + capture = rdpcap(cap_pcap_file) + if len(capture) > 0: + version = ntp_client_version(capture) + if version is None: + add_summary("No NTP packets received.") + return 'skip' + if version == 4: + add_summary("Using NTPv4.") + return 'pass' + else: + add_summary("Not using NTPv4.") + return 'fail' + else: + add_summary("No NTP packets received.") + return 'skip' + + +def add_summary(text): + global summary_text + summary_text = summary_text + " " + text if summary_text else text + + +write_report("{b}{t}\n{b}".format(b=dash_break_line, t=test_request)) + + +if test_request == 'connection.network.ntp_support': + write_report("{d}\n{b}".format(b=dash_break_line, d=description_ntp_support)) + result = test_ntp_support() + +write_report("RESULT {r} {t} {s}\n".format(r=result, t=test_request, s=summary_text.strip())) diff --git a/subset/ntp/test_ntp b/subset/ntp/test_ntp new file mode 100755 index 0000000000..7521f9d74d --- /dev/null +++ b/subset/ntp/test_ntp @@ -0,0 +1,9 @@ +#!/bin/bash -e + +REPORT=/tmp/report.txt + +STARTUP=/scans/startup.pcap + +python ntp_tests.py connection.network.ntp_support $STARTUP + +cat report.txt >> $REPORT diff --git a/testing/test_aux.out b/testing/test_aux.out index acf12f7aa7..bed2e7dec1 100644 --- a/testing/test_aux.out +++ b/testing/test_aux.out @@ -57,6 +57,9 @@ RESULT pass security.passwords.telnet Default passwords have been changed. RESULT pass security.passwords.ssh Default passwords have been changed. RESULT skip security.firmware Could not retrieve a firmware version with nmap. Check bacnet port. RESULT pass security.firmware version found: ?\xFF\xFF\x19,>u\x08\x00no +RESULT pass connection.network.ntp_support Using NTPv4. +RESULT fail connection.network.ntp_support Not using NTPv4. +RESULT skip connection.network.ntp_support No NTP packets received. dhcp requests 1 1 0 1 01: [] 02: ['02:macoui:TimeoutError', '02:ping:TimeoutError'] @@ -105,6 +108,9 @@ port-01 module_config modules "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "pass": { "enabled": true }, @@ -154,6 +160,9 @@ port-02 module_config modules "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "pass": { "enabled": false }, diff --git a/testing/test_aux.sh b/testing/test_aux.sh index ebf75ee79c..f78d30f506 100755 --- a/testing/test_aux.sh +++ b/testing/test_aux.sh @@ -62,9 +62,9 @@ site_path: inst/test_site schema_path: schemas/udmi interfaces: faux-1: - opts: brute broadcast_client ntp_pass + opts: brute broadcast_client ntpv4 faux-2: - opts: nobrute expiredtls bacnetfail pubber passwordfail ntp_fail opendns + opts: nobrute expiredtls bacnetfail pubber passwordfail ntpv3 opendns faux-3: opts: tls macoui passwordpass bacnet pubber broadcast_client long_dhcp_response_sec: 0 @@ -114,6 +114,7 @@ capture_test_results tls capture_test_results password capture_test_results discover capture_test_results network +capture_test_results ntp # Capture peripheral logs more inst/run-port-*/scans/ip_triggers.txt | cat From fde428980a4e15cf35bbac5a802ffec5eac012cd Mon Sep 17 00:00:00 2001 From: Trevor Date: Wed, 8 Jul 2020 17:15:13 -0700 Subject: [PATCH 17/38] UDMI registrar error handling updates (#508) --- .checkstyle.xml | 2 +- bin/registrar | 2 +- bin/validate | 12 ++- .../main/java/daq/pubber/Configuration.java | 1 + pubber/src/main/java/daq/pubber/Pubber.java | 15 ++-- subset/cloud/test_udmi | 2 +- .../.idea/codeStyles/codeStyleConfig.xml | 2 +- ...ckson_core_jackson_annotations_2_11_0.xml} | 6 +- ...rxml_jackson_core_jackson_core_2_11_0.xml} | 6 +- ..._jackson_core_jackson_databind_2_11_0.xml} | 6 +- ...format_jackson_dataformat_yaml_2_11_0.xml} | 6 +- ...ml => Gradle__org_yaml_snakeyaml_1_26.xml} | 6 +- .../.idea/modules/daq-validator.validator.iml | 10 +-- validator/bin/registrar | 4 + validator/bin/test_schema | 2 +- validator/bin/validate | 3 + .../daq/mqtt/registrar/LocalDevice.java | 47 +++++------ .../google/daq/mqtt/registrar/Registrar.java | 77 +++++++++++++------ .../google/daq/mqtt/util/CloudIotManager.java | 10 +-- .../daq/mqtt/util/FirestoreDataSink.java | 24 +----- .../google/daq/mqtt/validator/Validator.java | 10 +-- 21 files changed, 138 insertions(+), 115 deletions(-) rename validator/.idea/libraries/{Gradle__com_fasterxml_jackson_core_jackson_annotations_2_10_3.xml => Gradle__com_fasterxml_jackson_core_jackson_annotations_2_11_0.xml} (54%) rename validator/.idea/libraries/{Gradle__com_fasterxml_jackson_core_jackson_core_2_10_3.xml => Gradle__com_fasterxml_jackson_core_jackson_core_2_11_0.xml} (55%) rename validator/.idea/libraries/{Gradle__com_fasterxml_jackson_core_jackson_databind_2_10_3.xml => Gradle__com_fasterxml_jackson_core_jackson_databind_2_11_0.xml} (55%) rename validator/.idea/libraries/{Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_10_3.xml => Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_11_0.xml} (68%) rename validator/.idea/libraries/{Gradle__org_yaml_snakeyaml_1_24.xml => Gradle__org_yaml_snakeyaml_1_26.xml} (50%) diff --git a/.checkstyle.xml b/.checkstyle.xml index a214cc83f6..469a132853 100644 --- a/.checkstyle.xml +++ b/.checkstyle.xml @@ -42,7 +42,7 @@ - + diff --git a/bin/registrar b/bin/registrar index e2278951e4..3f941ba188 100755 --- a/bin/registrar +++ b/bin/registrar @@ -27,4 +27,4 @@ validator/bin/build > /dev/null echo Running tools version `git describe` -validator/bin/registrar $project_id $site_path $schema_path $* +validator/bin/registrar $project_id $site_path $schema_path $* 2>&1 diff --git a/bin/validate b/bin/validate index bd13b42612..9789659c11 100755 --- a/bin/validate +++ b/bin/validate @@ -20,16 +20,14 @@ if [ -z "$schema_path" ]; then false fi -validator/bin/build +echo Building validator... +validator/bin/build > /dev/null -unset GOOGLE_CLOUD_PROJECT -export GOOGLE_APPLICATION_CREDENTIALS=$PWD/$gcp_cred -echo Using credentials from $GOOGLE_APPLICATION_CREDENTIALS echo Configured topic is $gcp_topic echo Configured schema is $schema_path -if [ -n "$site_path" ]; then - echo Configured site path is $site_path -fi +echo Configured site path is $site_path echo +echo Running tools version `git describe` + validator/bin/validate $schema_path pubsub:$gcp_topic dev $site_path diff --git a/pubber/src/main/java/daq/pubber/Configuration.java b/pubber/src/main/java/daq/pubber/Configuration.java index 7c362781ef..e72d6919d2 100644 --- a/pubber/src/main/java/daq/pubber/Configuration.java +++ b/pubber/src/main/java/daq/pubber/Configuration.java @@ -10,6 +10,7 @@ public class Configuration { public String registryId; public String gatewayId; public String deviceId; + public String sitePath; public String keyFile = "local/rsa_private.pkcs8"; public byte[] keyBytes; public String algorithm = "RS256"; diff --git a/pubber/src/main/java/daq/pubber/Pubber.java b/pubber/src/main/java/daq/pubber/Pubber.java index 5f28a08efa..1265b65e53 100644 --- a/pubber/src/main/java/daq/pubber/Pubber.java +++ b/pubber/src/main/java/daq/pubber/Pubber.java @@ -8,9 +8,6 @@ import daq.udmi.Message.Pointset; import daq.udmi.Message.PointsetState; import daq.udmi.Message.State; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import java.nio.file.Files; import java.nio.file.Path; @@ -24,6 +21,8 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class Pubber { @@ -40,9 +39,10 @@ public class Pubber { private static final int MIN_REPORT_MS = 200; private static final int DEFAULT_REPORT_MS = 5000; private static final int CONFIG_WAIT_TIME_MS = 10000; - private static final int STATE_THROTTLE_MS = 1500; + private static final int STATE_THROTTLE_MS = 2000; private static final String CONFIG_ERROR_STATUS_KEY = "config_error"; private static final int LOGGING_MOD_COUNT = 10; + public static final String KEY_SITE_PATH_FORMAT = "%s/devices/%s/rsa_private.pkcs8"; private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); @@ -163,6 +163,11 @@ private void addPoint(AbstractPoint point) { } private void initialize() { + Preconditions.checkNotNull(configuration.deviceId, "configuration deviceId not defined"); + if (configuration.sitePath != null) { + configuration.keyFile = String.format(KEY_SITE_PATH_FORMAT, configuration.sitePath, + configuration.deviceId); + } Preconditions.checkState(mqttPublisher == null, "mqttPublisher already defined"); Preconditions.checkNotNull(configuration.keyFile, "configuration keyFile not defined"); System.err.println("Loading device key file from " + configuration.keyFile); @@ -262,8 +267,8 @@ private void publishLogMessage(String deviceId, String logMessage) { private void publishStateMessage(String deviceId) { lastStateTimeMs = sleepUntil(lastStateTimeMs + STATE_THROTTLE_MS); - info("Sending state message for device " + deviceId); deviceState.timestamp = new Date(); + info("Sending state message for device " + deviceId + " at " + deviceState.timestamp); mqttPublisher.publish(deviceId, STATE_TOPIC, deviceState); } diff --git a/subset/cloud/test_udmi b/subset/cloud/test_udmi index fde175a26d..aa484f5434 100755 --- a/subset/cloud/test_udmi +++ b/subset/cloud/test_udmi @@ -60,7 +60,7 @@ echo Configured schema is $schema_path echo Target device is $device_id echo -timeout 60 validator/bin/validate $PWD/$schema_path pubsub:$gcp_topic $service_id-$HOSTNAME || true +timeout 60 validator/bin/validate $PWD/$schema_path pubsub:$gcp_topic $service_id-$HOSTNAME -- || true function message_report { message_type=$1 diff --git a/validator/.idea/codeStyles/codeStyleConfig.xml b/validator/.idea/codeStyles/codeStyleConfig.xml index a55e7a179b..b9d18bf599 100644 --- a/validator/.idea/codeStyles/codeStyleConfig.xml +++ b/validator/.idea/codeStyles/codeStyleConfig.xml @@ -1,5 +1,5 @@ - \ No newline at end of file diff --git a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_10_3.xml b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_11_0.xml similarity index 54% rename from validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_10_3.xml rename to validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_11_0.xml index 940abc9cd6..fef9a9403a 100644 --- a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_10_3.xml +++ b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_11_0.xml @@ -1,11 +1,11 @@ - + - + - + \ No newline at end of file diff --git a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_10_3.xml b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_11_0.xml similarity index 55% rename from validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_10_3.xml rename to validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_11_0.xml index c39a1aad89..93709bc78c 100644 --- a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_10_3.xml +++ b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_11_0.xml @@ -1,11 +1,11 @@ - + - + - + \ No newline at end of file diff --git a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_10_3.xml b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_11_0.xml similarity index 55% rename from validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_10_3.xml rename to validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_11_0.xml index 401e4470cc..326959d4e0 100644 --- a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_10_3.xml +++ b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_11_0.xml @@ -1,11 +1,11 @@ - + - + - + \ No newline at end of file diff --git a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_10_3.xml b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_11_0.xml similarity index 68% rename from validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_10_3.xml rename to validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_11_0.xml index eeaf4be6fe..8b4a7f585e 100644 --- a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_10_3.xml +++ b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_11_0.xml @@ -1,11 +1,11 @@ - + - + - + \ No newline at end of file diff --git a/validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_24.xml b/validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_26.xml similarity index 50% rename from validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_24.xml rename to validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_26.xml index 6d98003d93..734cd9dad1 100644 --- a/validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_24.xml +++ b/validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_26.xml @@ -1,11 +1,11 @@ - + - + - + \ No newline at end of file diff --git a/validator/.idea/modules/daq-validator.validator.iml b/validator/.idea/modules/daq-validator.validator.iml index c07f3da9ef..f12ef8c574 100644 --- a/validator/.idea/modules/daq-validator.validator.iml +++ b/validator/.idea/modules/daq-validator.validator.iml @@ -12,8 +12,8 @@ - - + + @@ -46,9 +46,9 @@ - - - + + + diff --git a/validator/bin/registrar b/validator/bin/registrar index c302518d21..49dc18d1e3 100755 --- a/validator/bin/registrar +++ b/validator/bin/registrar @@ -16,6 +16,9 @@ devices_dir=$2 schema_dir=$3 device_filter=$4 +echo Using gcloud auth: +gcloud config get-value account || true + echo Using cloud project $project_id echo Using site config dir $devices_dir echo Using schema root dir $schema_dir @@ -24,6 +27,7 @@ echo Using device filter $device_filter JAVA=/usr/lib/jvm/java-11-openjdk-amd64/bin/java error=0 +echo java args $project_id $devices_dir $schema_dir $device_filter $JAVA -cp $jarfile $mainclass $project_id $devices_dir $schema_dir $device_filter || error=$? echo Registrar complete, exit $error diff --git a/validator/bin/test_schema b/validator/bin/test_schema index c73505aa2e..f3adbf1657 100755 --- a/validator/bin/test_schema +++ b/validator/bin/test_schema @@ -59,7 +59,7 @@ for schema in $schemas; do error=0 reltest=${testpath#$rootdir/} - (cd $rootdir; java -jar $jarfile $schemaname $reltest $ignoreset) 2> $output || error=$? + (cd $rootdir; java -jar $jarfile $schemaname $reltest $ignoreset --) 2> $output || error=$? if [ $force == y ]; then diff $expected $output || echo Updating $expected && cp $output $expected else diff --git a/validator/bin/validate b/validator/bin/validate index 910021f982..e79c158756 100755 --- a/validator/bin/validate +++ b/validator/bin/validate @@ -20,6 +20,9 @@ if [ ! -f $jarfile ]; then validator/bin/build fi +echo Using gcloud auth: +gcloud config get-value account || true + echo Executing validator $schema $target... echo Validating against schema $schemafile into validations/ diff --git a/validator/src/main/java/com/google/daq/mqtt/registrar/LocalDevice.java b/validator/src/main/java/com/google/daq/mqtt/registrar/LocalDevice.java index f36e338b85..e32dc8b2b0 100644 --- a/validator/src/main/java/com/google/daq/mqtt/registrar/LocalDevice.java +++ b/validator/src/main/java/com/google/daq/mqtt/registrar/LocalDevice.java @@ -51,7 +51,6 @@ class LocalDevice { private static final String RSA_CERT_PEM = "rsa_cert.pem"; private static final String RSA_PRIVATE_PEM = "rsa_private.pem"; private static final String RSA_PRIVATE_PKCS8 = "rsa_private.pkcs8"; - private static final String PHYSICAL_TAG_ERROR = "Physical tag %s %s does not match expected %s"; private static final Set DEVICE_FILES = ImmutableSet.of(METADATA_JSON); private static final Set KEY_FILES = ImmutableSet.of(RSA_PUBLIC_PEM, RSA_PRIVATE_PEM, RSA_PRIVATE_PKCS8); @@ -68,18 +67,17 @@ class LocalDevice { private final Map schemas; private final File deviceDir; private final UdmiSchema.Metadata metadata; - private final File devicesDir; private final ExceptionMap exceptionMap; private String deviceNumId; private CloudDeviceSettings settings; + private DeviceCredential deviceCredential; LocalDevice(File devicesDir, String deviceId, Map schemas) { try { this.deviceId = deviceId; this.schemas = schemas; - this.devicesDir = devicesDir; exceptionMap = new ExceptionMap("Exceptions for " + deviceId); deviceDir = new File(devicesDir, deviceId); metadata = readMetadata(); @@ -162,16 +160,21 @@ private String getAuthFileType() { return RSA_CERT_TYPE.equals(getAuthType()) ? RSA_CERT_FILE : RSA_KEY_FILE; } - private DeviceCredential loadCredential() { + public DeviceCredential loadCredential() { + deviceCredential = readCredential(); + return deviceCredential; + } + + public DeviceCredential readCredential() { try { if (hasGateway() && getAuthType() != null) { - throw new RuntimeException("Proxied devices should not have auth_type defined"); + throw new RuntimeException("Proxied devices should not have cloud.auth_type defined"); } if (!isDirectConnect()) { return null; } if (getAuthType() == null) { - throw new RuntimeException("Credential auth_type definition missing"); + throw new RuntimeException("Credential cloud.auth_type definition missing"); } File deviceKeyFile = new File(deviceDir, publicKeyFile()); if (!deviceKeyFile.exists()) { @@ -223,10 +226,6 @@ boolean isDirectConnect() { return isGateway() || !hasGateway(); } - String getGatewayId() { - return hasGateway() ? metadata.gateway.gateway_id : null; - } - CloudDeviceSettings getSettings() { try { if (settings != null) { @@ -236,7 +235,7 @@ CloudDeviceSettings getSettings() { if (metadata == null) { return settings; } - settings.credential = loadCredential(); + settings.credential = deviceCredential; settings.metadata = metadataString(); settings.config = deviceConfigString(); settings.proxyDevices = getProxyDevicesList(); @@ -297,6 +296,7 @@ private String metadataString() { } public void validateEnvelope(String registryId, String siteName) { + checkConsistency(siteName); try { UdmiSchema.Envelope envelope = new UdmiSchema.Envelope(); envelope.deviceId = deviceId; @@ -309,7 +309,6 @@ public void validateEnvelope(String registryId, String siteName) { } catch (Exception e) { throw new IllegalStateException("Validating envelope " + deviceId, e); } - checkConsistency(siteName); } private String fakeProjectId() { @@ -317,15 +316,17 @@ private String fakeProjectId() { } private void checkConsistency(String expectedSite) { - String siteName = metadata.system.location.site; - String assetSite = metadata.system.physical_tag.asset.site; String assetName = metadata.system.physical_tag.asset.name; - Preconditions.checkState(expectedSite.equals(siteName), - String.format(PHYSICAL_TAG_ERROR, "location", siteName, expectedSite)); - Preconditions.checkState(expectedSite.equals(assetSite), - String.format(PHYSICAL_TAG_ERROR, "site", assetSite, expectedSite)); Preconditions.checkState(deviceId.equals(assetName), - String.format(PHYSICAL_TAG_ERROR, "name", assetName, deviceId)); + String.format("system.physical_tag.asset.name %s does not match expected %s", assetName, deviceId)); + + String assetSite = metadata.system.physical_tag.asset.site; + Preconditions.checkState(expectedSite.equals(assetSite), + String.format("system.physical_tag.asset.site %s does not match expected %s", assetSite, expectedSite)); + + String siteName = metadata.system.location.site; + Preconditions.checkState(expectedSite.equals(siteName), + String.format("system.location.site %s does not match expected %s", siteName, expectedSite)); } private String makeNumId(UdmiSchema.Envelope envelope) { @@ -335,11 +336,12 @@ private String makeNumId(UdmiSchema.Envelope envelope) { public void writeErrors() { File errorsFile = new File(deviceDir, DEVICE_ERRORS_JSON); - System.err.println("Updating " + errorsFile); if (exceptionMap.isEmpty()) { + System.err.println("Removing " + errorsFile); errorsFile.delete(); return; } + System.err.println("Updating " + errorsFile); try (PrintStream printStream = new PrintStream(new FileOutputStream(errorsFile))) { ExceptionMap.ErrorTree errorTree = ExceptionMap.format(exceptionMap, ERROR_FORMAT_INDENT); errorTree.write(printStream); @@ -377,8 +379,9 @@ void writeNormalized() { public void writeConfigFile() { File configFile = new File(deviceDir, GENERATED_CONFIG_JSON); try (OutputStream outputStream = new FileOutputStream(configFile)) { - outputStream.write(settings.config.getBytes()); + outputStream.write(getSettings().config.getBytes()); } catch (Exception e) { + e.printStackTrace(); throw new RuntimeException("While writing "+ configFile.getAbsolutePath(), e); } } @@ -399,7 +402,7 @@ public ExceptionMap getErrors() { return exceptionMap; } - public boolean hasValidMetadata() { + public boolean isValid() { return metadata != null; } diff --git a/validator/src/main/java/com/google/daq/mqtt/registrar/Registrar.java b/validator/src/main/java/com/google/daq/mqtt/registrar/Registrar.java index 6b991bca48..e665d9ed16 100644 --- a/validator/src/main/java/com/google/daq/mqtt/registrar/Registrar.java +++ b/validator/src/main/java/com/google/daq/mqtt/registrar/Registrar.java @@ -1,5 +1,7 @@ package com.google.daq.mqtt.registrar; +import static java.util.stream.Collectors.toSet; + import com.fasterxml.jackson.annotation.JsonInclude.Include; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; @@ -7,24 +9,30 @@ import com.google.api.services.cloudiot.v1.model.Device; import com.google.api.services.cloudiot.v1.model.DeviceCredential; import com.google.common.base.Preconditions; -import com.google.daq.mqtt.util.*; +import com.google.common.collect.ImmutableList; +import com.google.daq.mqtt.util.CloudDeviceSettings; +import com.google.daq.mqtt.util.CloudIotManager; +import com.google.daq.mqtt.util.ConfigUtil; +import com.google.daq.mqtt.util.ExceptionMap; import com.google.daq.mqtt.util.ExceptionMap.ErrorTree; -import org.everit.json.schema.Schema; -import org.everit.json.schema.loader.SchemaClient; -import org.everit.json.schema.loader.SchemaLoader; -import org.json.JSONObject; -import org.json.JSONTokener; - +import com.google.daq.mqtt.util.PubSubPusher; import java.io.File; import java.io.FileInputStream; import java.io.InputStream; import java.math.BigInteger; -import java.util.*; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; - -import static java.util.stream.Collectors.toSet; +import org.everit.json.schema.Schema; +import org.everit.json.schema.loader.SchemaClient; +import org.everit.json.schema.loader.SchemaLoader; +import org.json.JSONObject; +import org.json.JSONTokener; public class Registrar { @@ -43,6 +51,7 @@ public class Registrar { .setDateFormat(new ISO8601DateFormat()) .setSerializationInclusion(Include.NON_NULL); public static final String ALL_MATCH = ""; + private static final String LOCAL_ONLY_PROJECT_ID = "--"; private CloudIotManager cloudIotManager; private File siteConfig; @@ -90,7 +99,7 @@ private void writeErrors() throws Exception { .put(device.getDeviceId(), "True"); } }); - if (!blockErrors.isEmpty()) { + if (blockErrors != null && !blockErrors.isEmpty()) { errorSummary.put("Block", blockErrors.stream().collect(Collectors.toMap( Map.Entry::getKey, entry -> entry.getValue().toString()))); } @@ -121,27 +130,31 @@ private void processDevices(String deviceRegex) { Set extraDevices = cloudDevices.stream().map(Device::getId).collect(toSet()); for (String localName : localDevices.keySet()) { LocalDevice localDevice = localDevices.get(localName); - if (!localDevice.hasValidMetadata()) { + if (!localDevice.isValid()) { System.err.println("Skipping (invalid) " + localName); continue; } extraDevices.remove(localName); try { - updateCloudIoT(localDevice); localDevice.writeConfigFile(); - Device device = Preconditions.checkNotNull(fetchDevice(localName), - "missing device " + localName); - BigInteger numId = Preconditions.checkNotNull(device.getNumId(), - "missing deviceNumId for " + localName); - localDevice.setDeviceNumId(numId.toString()); - sendMetadataMessage(localDevice); + if (!localOnly()) { + updateCloudIoT(localDevice); + Device device = Preconditions.checkNotNull(fetchDevice(localName), + "missing device " + localName); + BigInteger numId = Preconditions.checkNotNull(device.getNumId(), + "missing deviceNumId for " + localName); + localDevice.setDeviceNumId(numId.toString()); + sendMetadataMessage(localDevice); + } } catch (Exception e) { System.err.println("Deferring exception: " + e.toString()); localDevice.getErrors().put("Registering", e); } } - bindGatewayDevices(localDevices); - blockErrors = blockExtraDevices(extraDevices); + if (!localOnly()) { + bindGatewayDevices(localDevices); + blockErrors = blockExtraDevices(extraDevices); + } System.err.println(String.format("Processed %d devices", localDevices.size())); } catch (Exception e) { throw new RuntimeException("While processing devices", e); @@ -209,8 +222,17 @@ private void shutdown() { } private List fetchDeviceList(Pattern devicePattern) { - System.err.println("Fetching remote registry " + cloudIotManager.getRegistryId()); - return cloudIotManager.fetchDeviceList(devicePattern); + if (localOnly()) { + System.err.println("Skipping remote registry fetch"); + return ImmutableList.of(); + } else { + System.err.println("Fetching remote registry " + cloudIotManager.getRegistryPath()); + return cloudIotManager.fetchDeviceList(devicePattern); + } + } + + private boolean localOnly() { + return LOCAL_ONLY_PROJECT_ID.equals(projectId); } private Map loadLocalDevices(Pattern devicePattern) { @@ -267,8 +289,13 @@ private Map loadDevices(File devicesDir, String[] devices, Matcher deviceMatch = devicePattern.matcher(deviceName); if (deviceMatch.find() && LocalDevice.deviceExists(devicesDir, deviceName)) { System.err.println("Loading local device " + deviceName); - LocalDevice localDevice = new LocalDevice(devicesDir, deviceName, schemas); - localDevices.put(deviceName, localDevice); + LocalDevice localDevice = localDevices.computeIfAbsent(deviceName, + keyName -> new LocalDevice(devicesDir, deviceName, schemas)); + try { + localDevice.loadCredential(); + } catch (Exception e) { + localDevice.getErrors().put("Credential", e); + } try { localDevice.validateEnvelope(cloudIotManager.getRegistryId(), cloudIotManager.getSiteName()); } catch (Exception e) { diff --git a/validator/src/main/java/com/google/daq/mqtt/util/CloudIotManager.java b/validator/src/main/java/com/google/daq/mqtt/util/CloudIotManager.java index f69c3bb0ae..1fec58ed0a 100644 --- a/validator/src/main/java/com/google/daq/mqtt/util/CloudIotManager.java +++ b/validator/src/main/java/com/google/daq/mqtt/util/CloudIotManager.java @@ -68,12 +68,12 @@ private static CloudIotConfig validate(CloudIotConfig cloudIotConfig) { return cloudIotConfig; } - private String getRegistryPath(String registryId) { + public String getRegistryPath() { return projectPath + "/registries/" + registryId; } private String getDevicePath(String registryId, String deviceId) { - return getRegistryPath(registryId) + "/devices/" + deviceId; + return getRegistryPath() + "/devices/" + deviceId; } private void initializeCloudIoT() { @@ -168,7 +168,7 @@ private GatewayConfig getGatewayConfig(CloudDeviceSettings settings) { private void createDevice(String deviceId, CloudDeviceSettings settings) throws IOException { try { - cloudIotRegistries.devices().create(getRegistryPath(registryId), + cloudIotRegistries.devices().create(getRegistryPath(), makeDevice(deviceId, settings, null)).execute(); } catch (GoogleJsonResponseException e) { throw new RuntimeException("Remote error creating device " + deviceId, e); @@ -205,7 +205,7 @@ public List fetchDeviceList(Pattern devicePattern) { try { List devices = cloudIotRegistries .devices() - .list(getRegistryPath(registryId)) + .list(getRegistryPath()) .setPageSize(LIST_PAGE_SIZE) .execute() .getDevices(); @@ -254,7 +254,7 @@ public Object getCloudRegion() { } public void bindDevice(String proxyDeviceId, String gatewayDeviceId) throws IOException { - cloudIotRegistries.bindDeviceToGateway(getRegistryPath(registryId), + cloudIotRegistries.bindDeviceToGateway(getRegistryPath(), getBindRequest(proxyDeviceId, gatewayDeviceId)).execute(); } diff --git a/validator/src/main/java/com/google/daq/mqtt/util/FirestoreDataSink.java b/validator/src/main/java/com/google/daq/mqtt/util/FirestoreDataSink.java index 9a09a0cc9d..ac0d527f2a 100644 --- a/validator/src/main/java/com/google/daq/mqtt/util/FirestoreDataSink.java +++ b/validator/src/main/java/com/google/daq/mqtt/util/FirestoreDataSink.java @@ -1,6 +1,5 @@ package com.google.daq.mqtt.util; -import com.google.auth.Credentials; import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.ServiceOptions; import com.google.cloud.firestore.DocumentReference; @@ -8,9 +7,6 @@ import com.google.cloud.firestore.FirestoreOptions; import com.google.common.base.Preconditions; import com.google.daq.mqtt.util.ExceptionMap.ErrorTree; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; import java.time.Instant; import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; @@ -19,8 +15,6 @@ public class FirestoreDataSink { - private static final String - CREDENTIAL_ERROR_FORMAT = "Credential file %s defined by %s not found."; private static final String VIEW_URL_FORMAT = "https://console.cloud.google.com/firestore/data/registries/?project=%s"; @@ -34,10 +28,10 @@ public class FirestoreDataSink { public FirestoreDataSink() { try { - Credentials projectCredentials = getProjectCredentials(); + GoogleCredentials credential = GoogleCredentials.getApplicationDefault(); FirestoreOptions firestoreOptions = FirestoreOptions.getDefaultInstance().toBuilder() - .setCredentials(projectCredentials) + .setCredentials(credential) .setProjectId(projectId) .setTimestampsInSnapshotsEnabled(true) .build(); @@ -48,20 +42,8 @@ public FirestoreDataSink() { } } - private Credentials getProjectCredentials() throws IOException { - File credentialFile = new File(System.getenv(ServiceOptions.CREDENTIAL_ENV_NAME)); - if (!credentialFile.exists()) { - throw new RuntimeException(String.format(CREDENTIAL_ERROR_FORMAT, - credentialFile.getAbsolutePath(), ServiceOptions.CREDENTIAL_ENV_NAME)); - } - try (FileInputStream serviceAccount = new FileInputStream(credentialFile)) { - return GoogleCredentials.fromStream(serviceAccount); - } - } - public void validationResult(String deviceId, String schemaId, Map attributes, - Object message, - ErrorTree errorTree) { + Object message, ErrorTree errorTree) { if (oldError.get() != null) { throw oldError.getAndSet(null); } diff --git a/validator/src/main/java/com/google/daq/mqtt/validator/Validator.java b/validator/src/main/java/com/google/daq/mqtt/validator/Validator.java index 5fc94ed99c..ca133f2769 100644 --- a/validator/src/main/java/com/google/daq/mqtt/validator/Validator.java +++ b/validator/src/main/java/com/google/daq/mqtt/validator/Validator.java @@ -56,6 +56,7 @@ public class Validator { private static final String DEVICE_REGISTRY_ID_KEY = "deviceRegistryId"; private static final String UNKNOWN_SCHEMA_DEFAULT = "unknown"; private static final String POINTSET_TYPE = "pointset"; + private static final String NO_SITE = "--"; private FirestoreDataSink dataSink; private File schemaRoot; private String schemaSpec; @@ -69,15 +70,14 @@ public class Validator { public static void main(String[] args) { Validator validator = new Validator(); try { - System.out.println(ServiceOptions.CREDENTIAL_ENV_NAME + "=" + - System.getenv(ServiceOptions.CREDENTIAL_ENV_NAME)); - if (args.length < 3 || args.length > 4) { - throw new IllegalArgumentException("Args: schema target inst_name [site]"); + if (args.length != 4) { + throw new IllegalArgumentException("Args: [schema] [target] [inst_name] [site]"); } validator.setSchemaSpec(args[0]); String targetSpec = args[1]; String instName = args[2]; - if (args.length >= 4) { + String siteDir = args[3]; + if (!NO_SITE.equals(siteDir)) { validator.setSiteDir(args[3]); } if (targetSpec.startsWith(PUBSUB_PREFIX)) { From 7478825759236645d3329939b3350dde4c678f1a Mon Sep 17 00:00:00 2001 From: Trevor Date: Thu, 9 Jul 2020 16:55:58 -0700 Subject: [PATCH 18/38] Use trunk rather than stack between switches (#526) --- daq/network.py | 2 +- daq/topology.py | 27 +++++++++++++++------------ 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/daq/network.py b/daq/network.py index b622205926..255a6bd259 100644 --- a/daq/network.py +++ b/daq/network.py @@ -136,7 +136,7 @@ def _attach_sec_device_links(self): def is_system_port(self, dpid, port): """Check if the dpid/port combo is the system trunk port""" - return dpid == self.topology.PRI_DPID and port == self.topology.PRI_STACK_PORT + return dpid == self.topology.PRI_DPID and port == self.topology.PRI_TRUNK_PORT def is_device_port(self, dpid, port): """Check if the dpid/port combo is for a valid device""" diff --git a/daq/topology.py b/daq/topology.py index 2438b49012..8d0cb62390 100644 --- a/daq/topology.py +++ b/daq/topology.py @@ -31,13 +31,14 @@ class FaucetTopology: INCOMING_ACL_FORMAT = "dp_%s_incoming_acl" PORTSET_ACL_FORMAT = "dp_%s_portset_%d_acl" LOCAL_ACL_FORMAT = "dp_%s_local_acl" - _DEFAULT_STACK_PORT_NAME = "stack_sec" + _DEFAULT_SEC_TRUNK_NAME = "trunk_sec" _MIRROR_IFACE_FORMAT = "mirror-%d" _MIRROR_PORT_BASE = 1000 _SWITCH_LOCAL_PORT = _MIRROR_PORT_BASE _VLAN_BASE = 1000 PRI_DPID = 1 - PRI_STACK_PORT = 1 + PRI_TRUNK_PORT = 1 + PRI_TRUNK_NAME = 'trunk_pri' _NO_VLAN = "0x0000/0x1000" def __init__(self, config): @@ -92,7 +93,7 @@ def get_sec_dpid(self): return self.sec_dpid def get_sec_port(self): - """Return the secondary stacking port""" + """Return the secondary trunk port""" return self.sec_port def get_device_intfs(self): @@ -173,20 +174,23 @@ def _update_port_vlan(self, port_no, port_set): def _port_set_vlan(self, port_set=None): return self._VLAN_BASE + (port_set if port_set else 0) - def _make_pri_stack_interface(self): + def _make_pri_trunk_interface(self): interface = {} interface['acl_in'] = self.INCOMING_ACL_FORMAT % self.pri_name - interface['stack'] = {'dp': self.sec_name, 'port': self.sec_port} - interface['name'] = 'stack_pri' + interface['tagged_vlans'] = self._vlan_tags() + interface['name'] = self.PRI_TRUNK_NAME return interface - def _make_sec_stack_interface(self): + def _make_sec_trunk_interface(self): interface = {} interface['acl_in'] = self.INCOMING_ACL_FORMAT % self.sec_name - interface['stack'] = {'dp': self.pri_name, 'port': self.PRI_STACK_PORT} - interface['name'] = self.get_ext_intf() or self._DEFAULT_STACK_PORT_NAME + interface['tagged_vlans'] = self._vlan_tags() + interface['name'] = self.get_ext_intf() or self._DEFAULT_SEC_TRUNK_NAME return interface + def _vlan_tags(self): + return list(range(self._VLAN_BASE, self._VLAN_BASE + self.sec_port)) + def _make_default_acl_rules(self): rules = [] if not self._append_acl_template(rules, 'raw'): @@ -201,7 +205,7 @@ def _make_sec_port_interface(self, port_no): def _make_pri_interfaces(self): interfaces = {} - interfaces[self.PRI_STACK_PORT] = self._make_pri_stack_interface() + interfaces[self.PRI_TRUNK_PORT] = self._make_pri_trunk_interface() for port_set in range(1, self.sec_port): for port in self._get_gw_ports(port_set): interfaces[port] = self._make_gw_interface(port_set) @@ -212,7 +216,7 @@ def _make_pri_interfaces(self): def _make_sec_interfaces(self): interfaces = {} - interfaces[self.sec_port] = self._make_sec_stack_interface() + interfaces[self.sec_port] = self._make_sec_trunk_interface() for port in range(1, self.sec_port): interfaces[port] = self._make_sec_port_interface(port) return interfaces @@ -228,7 +232,6 @@ def _make_pri_topology(self): pri_dp = {} pri_dp['dp_id'] = self.PRI_DPID pri_dp['name'] = self.pri_name - pri_dp['stack'] = {'priority':1} pri_dp['interfaces'] = self._make_pri_interfaces() return pri_dp From a0bc2b725c948cc37cd85d4ead699cc95f96cdbd Mon Sep 17 00:00:00 2001 From: henry54809 Date: Fri, 10 Jul 2020 09:10:13 -0700 Subject: [PATCH 19/38] using usi in daq (#520) --- bin/build_proto | 2 + bin/setup_dev | 5 +- cmd/exrun | 9 +- cmd/usi | 18 + config/modules/host.conf | 1 + config/modules/topo.conf | 1 + config/system/default.yaml | 4 + daq/gateway.py | 19 +- daq/host.py | 38 +- daq/runner.py | 2 +- firebase/public/protos.hash | 2 +- firebase/public/protos.html | 35 ++ libs/proto/system_config_pb2.py | 178 ++++--- libs/proto/usi_pb2.py | 449 ++++++++++++++++++ libs/proto/usi_pb2_grpc.py | 161 +++++++ proto/system_config.proto | 9 + testing/run_unit_tests.sh | 2 +- usi/Dockerfile.usi | 3 +- usi/build.conf | 2 + usi/src/main/java/daq/usi/UsiImpl.java | 37 +- .../main/java/daq/usi/ovs/OpenVSwitch.java | 48 +- usi/start | 2 +- 22 files changed, 909 insertions(+), 118 deletions(-) create mode 100755 cmd/usi create mode 100644 libs/proto/usi_pb2.py create mode 100644 libs/proto/usi_pb2_grpc.py create mode 100644 usi/build.conf diff --git a/bin/build_proto b/bin/build_proto index ad07980e9e..b0886e5d86 100755 --- a/bin/build_proto +++ b/bin/build_proto @@ -56,3 +56,5 @@ mkdir -p libs/proto/ touch libs/proto/__init__.py cp build/daq/proto/*.py libs/proto/ cp build/protos.html $WEB_ROOT/ + +python -m grpc_tools.protoc -I usi/src/main/proto/ --python_out=libs/proto/ --grpc_python_out=libs/proto/ usi/src/main/proto/usi.proto diff --git a/bin/setup_dev b/bin/setup_dev index 62223aa9c3..59eb0f6df3 100755 --- a/bin/setup_dev +++ b/bin/setup_dev @@ -137,8 +137,9 @@ $PIP install --upgrade --index-url=https://pypi.python.org/simple Jinja2 \ google-api-core==1.16.0 \ google-cloud-storage==1.16.1 \ google-cloud-firestore==1.6.0 \ - google-cloud-logging==1.14.0 - + google-cloud-logging==1.14.0 \ + grpcio-tools==1.30.0 + $PIP freeze echo Resetting .cache directory permissions... test -n "$USER" && sudo chown $USER -R $HOME/.cache diff --git a/cmd/exrun b/cmd/exrun index e0538e81a2..88a047994b 100755 --- a/cmd/exrun +++ b/cmd/exrun @@ -44,6 +44,7 @@ FAUCET=$(realpath $ROOT/faucet) FORCH=$(realpath $ROOT/forch) MININET=$(realpath $ROOT/mininet) LIBS=$(realpath $ROOT/libs) +PROTO=$(realpath $ROOT/libs/proto) if [ ! -d $FAUCET ]; then echo Faucet directory $FAUCET not found, try running bin/setup_dev. @@ -66,7 +67,7 @@ if [ "$1" == "-h" ]; then skip_autostart=y fi -export PYTHONPATH=$FORCH:$FAUCET:$MININET:$LIBS +export PYTHONPATH=$FORCH:$FAUCET:$MININET:$LIBS:$PROTO mkdir -p $INSTDIR rm -f $INSTDIR/faucet* $cleanup_file @@ -81,7 +82,8 @@ sudo rm -f $cleanup_file function autostart { tmp=`mktemp` echo DAQ autostart $@ - eval $@ | tee $tmp + eval $@ > $tmp # Don't use "eval $@ | tee $tmp" here; breaks cmd/usi. + cat $tmp grep -e '^\s*DAQ autoclean\s' $tmp >> $cleanup_file || true } @@ -105,6 +107,9 @@ else echo No external switch model specified. fi +docker rm -f daq-usi || true +autostart cmd/usi + # Kill any gateways so that they don't prematurely assign an IP address. gwids=$(docker ps --format '{{ .Image }} {{ .Names }}' | fgrep daqf/networking | awk '{print $2}') || true for gwid in $gwids; do diff --git a/cmd/usi b/cmd/usi new file mode 100755 index 0000000000..fe39673d89 --- /dev/null +++ b/cmd/usi @@ -0,0 +1,18 @@ +#!/bin/bash -e +TMP_DIR=/tmp/usi + +function dump_ovs_interfaces { + while true; do + sudo ovs-ofctl show sec > $TMP_DIR/ovs_output.txt || true + sleep 5 + done +} + +echo Starting USI +mkdir -p $TMP_DIR +dump_ovs_interfaces & +PID=$! +docker run -d -v /tmp/usi:/ovs --privileged --network=host --name daq-usi daqf/usi + +echo DAQ autoclean docker kill daq-usi +echo DAQ autoclean kill $PID diff --git a/config/modules/host.conf b/config/modules/host.conf index e6ec5e37ef..8ec24df938 100644 --- a/config/modules/host.conf +++ b/config/modules/host.conf @@ -12,6 +12,7 @@ add mudgee # Additional base modules include subset/pentests/build.conf +include usi/build.conf # Example of how to remove something. remove unused diff --git a/config/modules/topo.conf b/config/modules/topo.conf index 4d47ec6cf9..ba02ce2374 100644 --- a/config/modules/topo.conf +++ b/config/modules/topo.conf @@ -3,3 +3,4 @@ build docker/modules # Use ping with runtime configuration for topo testing. add ping +include usi/build.conf diff --git a/config/system/default.yaml b/config/system/default.yaml index 644993b96e..67daa36ba0 100644 --- a/config/system/default.yaml +++ b/config/system/default.yaml @@ -37,3 +37,7 @@ long_dhcp_response_sec: 105 # finish hook: executed at the end of every test finish_hook: bin/dump_network + +# usi url for DAQ to connect to +usi_setup: + url: localhost:5000 diff --git a/daq/gateway.py b/daq/gateway.py index c7c54182a5..9984dc7a6e 100644 --- a/daq/gateway.py +++ b/daq/gateway.py @@ -12,6 +12,7 @@ LOGGER = logger.get_logger('gateway') + class Gateway(): """Gateway collection class for managing testing services""" @@ -37,8 +38,8 @@ def __init__(self, runner, name, port_set, network): self.dummy = None self.tmpdir = None self.targets = {} - self.test_ports = {} - self.ready = {} + self.test_ports = set() + self.ready = set() self.activated = False self.result_linger = False self._scan_monitor = None @@ -125,6 +126,14 @@ def request_new_ip(self, mac): """Requests a new ip for the device""" self.execute_script('new_ip', mac) + def change_dhcp_response_time(self, mac, time): + """Change dhcp response time for device mac""" + self.execute_script('change_dhcp_response_time', mac, time) + + def stop_dhcp_response(self, mac): + """Stops DHCP respopnse for the device""" + self.change_dhcp_response_time(mac, -1) + def allocate_test_port(self): """Get the test port to use for this gateway setup""" test_port = self._switch_port(self.TEST_OFFSET_START) @@ -132,7 +141,7 @@ def allocate_test_port(self): test_port = test_port + 1 limit_port = self._switch_port(self.NUM_SET_PORTS) assert test_port < limit_port, 'no test ports available' - self.test_ports[test_port] = True + self.test_ports.add(test_port) return test_port def _startup_scan(self, host): @@ -160,7 +169,7 @@ def _scan_error(self, e): def release_test_port(self, test_port): """Release the given port from the gateway""" assert test_port in self.test_ports, 'test port not allocated' - del self.test_ports[test_port] + self.test_ports.remove(test_port) def _switch_port(self, offset): return self.port_set * self.SET_SPACING + offset @@ -207,7 +216,7 @@ def target_ready(self, target_mac): """Mark a target ready, and return set of ready targets""" if not target_mac in self.ready: LOGGER.info('Ready target %s from gateway group %s', target_mac, self.name) - self.ready[target_mac] = True + self.ready.add(target_mac) return self.ready def get_targets(self): diff --git a/daq/host.py b/daq/host.py index dc13f9b659..8dc3d5f8ac 100644 --- a/daq/host.py +++ b/daq/host.py @@ -5,9 +5,12 @@ import shutil import time from datetime import timedelta, datetime +import grpc from clib import tcpdump_helper from report import ResultType, ReportGenerator +from proto import usi_pb2 as usi +from proto import usi_pb2_grpc as usi_service import configurator import docker_test @@ -46,10 +49,12 @@ class MODE: LONG = 'long' MERR = 'merr' + def pre_states(): """Return pre-test states for basic operation""" return ['startup', 'sanity', 'ipaddr', 'base', 'monitor'] + def post_states(): """Return post-test states for recording finalization""" return ['finish', 'info', 'timer'] @@ -96,6 +101,7 @@ def __init__(self, runner, gateway, target, config): _default_timeout_sec = int(config.get('default_timeout_sec', 0)) self._default_timeout_sec = _default_timeout_sec if _default_timeout_sec else None self._finish_hook_script = config.get('finish_hook') + self._usi_url = config.get('usi_setup', {}).get('url') self._mirror_intf_name = None self._monitor_ref = None self._monitor_start = None @@ -271,6 +277,21 @@ def _state_transition(self, target, expected=None): LOGGER.debug('Target port %d state: %s -> %s', self.target_port, self.state, target) self.state = target + def _build_switch_info(self) -> usi.SwitchInfo: + switch_config = self._get_switch_config() + if switch_config["model"]: + switch_model = usi.SwitchModel.Value(switch_config["model"]) + else: + switch_model = usi.SwitchModel.OVS_SWITCH + params = { + "ip_addr": switch_config["ip"], + "device_port": self.target_port, + "model": switch_model, + "username": switch_config["username"], + "password": switch_config["password"] + } + return usi.SwitchInfo(**params) + def is_running(self): """Return True if this host is running active test.""" return self.state != _STATE.ERROR and self.state != _STATE.DONE @@ -285,6 +306,21 @@ def notify_activate(self): self._record_result('startup', state=MODE.HOLD) return self.state == _STATE.WAITING + def connect_port(self, connect): + """Connects/Disconnects port for this host""" + switch_info = self._build_switch_info() + try: + with grpc.insecure_channel(self._usi_url) as channel: + stub = usi_service.USIServiceStub(channel) + if connect: + res = stub.connect(switch_info) + else: + res = stub.disconnect(switch_info) + LOGGER.info('Target port %s %s successful? %s', self.target_port, "connect" + if connect else "disconnect", res.success) + except Exception as e: + LOGGER.error(e) + def _prepare(self): LOGGER.info('Target port %d waiting for ip as %s', self.target_port, self.target_mac) self._state_transition(_STATE.WAITING, _STATE.INIT) @@ -306,7 +342,7 @@ def _prepare(self): if dhcp_mode == 'long_response' else 0 LOGGER.info('Target port %d using %s DHCP mode, wait %s', self.target_port, dhcp_mode, wait_time) - self.gateway.execute_script('change_dhcp_response_time', self.target_mac, wait_time) + self.gateway.change_dhcp_response_time(self.target_mac, wait_time) _ = [listener(self) for listener in self._dhcp_listeners] def _aux_module_timeout_handler(self): diff --git a/daq/runner.py b/daq/runner.py index 631aa43526..5895045271 100644 --- a/daq/runner.py +++ b/daq/runner.py @@ -393,7 +393,7 @@ def _target_set_trigger(self, target_port): # Stops all DHCP response initially # Selectively enables dhcp response at ipaddr stage based on dhcp mode - gateway.execute_script('change_dhcp_response_time', target_mac, -1) + gateway.stop_dhcp_response(target_mac) gateway.attach_target(target_port, target) try: diff --git a/firebase/public/protos.hash b/firebase/public/protos.hash index bab39e76c2..3873f3c004 100644 --- a/firebase/public/protos.hash +++ b/firebase/public/protos.hash @@ -1 +1 @@ -b7a56a30dafe26576d6bdef00dfb57dc07a016ac proto/system_config.proto +96148b4135bc7326586f96fc38a18beeca8147c4 proto/system_config.proto diff --git a/firebase/public/protos.html b/firebase/public/protos.html index d24bed4958..cfcb14e33d 100644 --- a/firebase/public/protos.html +++ b/firebase/public/protos.html @@ -198,6 +198,10 @@

Table of Contents

MSwitchSetup +
  • + MUSISetup +
  • + @@ -478,6 +482,13 @@

    DaqConfig

    Set time between port disconnect and host tests shutdown

    + + usi_setup + USISetup + +

    USI url

    + + @@ -679,6 +690,30 @@

    SwitchSetup

    +

    USISetup

    +

    USI paramters

    + + + + + + + + + + + + + + + + +
    FieldTypeLabelDescription
    urlstring

    + + + + + diff --git a/libs/proto/system_config_pb2.py b/libs/proto/system_config_pb2.py index e4746e687f..e110d5eade 100644 --- a/libs/proto/system_config_pb2.py +++ b/libs/proto/system_config_pb2.py @@ -1,7 +1,8 @@ -# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: daq/proto/system_config.proto +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -18,7 +19,7 @@ package='', syntax='proto3', serialized_options=None, - serialized_pb=b'\n\x1d\x64\x61q/proto/system_config.proto\"\xfb\x07\n\tDaqConfig\x12\x18\n\x10site_description\x18\x01 \x01(\t\x12\x18\n\x10monitor_scan_sec\x18\x02 \x01(\x05\x12\x1b\n\x13\x64\x65\x66\x61ult_timeout_sec\x18\x03 \x01(\x05\x12\x12\n\nsettle_sec\x18& \x01(\x05\x12\x11\n\tbase_conf\x18\x04 \x01(\t\x12\x11\n\tsite_path\x18\x05 \x01(\t\x12\x1f\n\x17initial_dhcp_lease_time\x18\x06 \x01(\t\x12\x17\n\x0f\x64hcp_lease_time\x18\x07 \x01(\t\x12\x19\n\x11\x64hcp_response_sec\x18\' \x01(\x05\x12\x1e\n\x16long_dhcp_response_sec\x18\x08 \x01(\x05\x12\"\n\x0cswitch_setup\x18\t \x01(\x0b\x32\x0c.SwitchSetup\x12\x12\n\nhost_tests\x18\x10 \x01(\t\x12\x13\n\x0b\x62uild_tests\x18$ \x01(\x08\x12\x11\n\trun_limit\x18\x11 \x01(\x05\x12\x11\n\tfail_mode\x18\x12 \x01(\x08\x12\x13\n\x0bsingle_shot\x18\" \x01(\x08\x12\x15\n\rresult_linger\x18\x13 \x01(\x08\x12\x0f\n\x07no_test\x18\x14 \x01(\x08\x12\x11\n\tkeep_hold\x18( \x01(\x08\x12\x14\n\x0c\x64\x61q_loglevel\x18\x15 \x01(\t\x12\x18\n\x10mininet_loglevel\x18\x16 \x01(\t\x12\x13\n\x0b\x66inish_hook\x18# \x01(\t\x12\x10\n\x08gcp_cred\x18\x17 \x01(\t\x12\x11\n\tgcp_topic\x18\x18 \x01(\t\x12\x13\n\x0bschema_path\x18\x19 \x01(\t\x12\x11\n\tmud_files\x18\x1a \x01(\t\x12\x14\n\x0c\x64\x65vice_specs\x18\x1b \x01(\t\x12\x13\n\x0btest_config\x18\x1c \x01(\t\x12\x19\n\x11port_debounce_sec\x18\x1d \x01(\x05\x12\x11\n\tfail_hook\x18\x1e \x01(\t\x12\x17\n\x0f\x64\x65vice_template\x18\x1f \x01(\t\x12\x14\n\x0csite_reports\x18 \x01(\t\x12\x1f\n\x17run_data_retention_days\x18! \x01(\x02\x12.\n\ninterfaces\x18% \x03(\x0b\x32\x1a.DaqConfig.InterfacesEntry\x12/\n\x0b\x66\x61il_module\x18/ \x03(\x0b\x32\x1a.DaqConfig.FailModuleEntry\x12\x1d\n\x15port_flap_timeout_sec\x18\x30 \x01(\x05\x1a=\n\x0fInterfacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.Interface:\x02\x38\x01\x1a\x31\n\x0f\x46\x61ilModuleEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe2\x01\n\x0bSwitchSetup\x12\x11\n\tctrl_intf\x18\t \x01(\t\x12\x0f\n\x07ip_addr\x18\x0b \x01(\t\x12\x13\n\x0buplink_port\x18\r \x01(\x05\x12\x0f\n\x07lo_port\x18\x0e \x01(\x05\x12\x0f\n\x07lo_addr\x18\x0f \x01(\t\x12\x11\n\tmods_addr\x18\x10 \x01(\t\x12\x0f\n\x07of_dpid\x18) \x01(\t\x12\x11\n\tdata_intf\x18* \x01(\t\x12\x0e\n\x06\x65xt_br\x18+ \x01(\t\x12\r\n\x05model\x18, \x01(\t\x12\x10\n\x08username\x18- \x01(\t\x12\x10\n\x08password\x18. \x01(\t\"\'\n\tInterface\x12\x0c\n\x04opts\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x62\x06proto3' + serialized_pb=_b('\n\x1d\x64\x61q/proto/system_config.proto\"\x99\x08\n\tDaqConfig\x12\x18\n\x10site_description\x18\x01 \x01(\t\x12\x18\n\x10monitor_scan_sec\x18\x02 \x01(\x05\x12\x1b\n\x13\x64\x65\x66\x61ult_timeout_sec\x18\x03 \x01(\x05\x12\x12\n\nsettle_sec\x18& \x01(\x05\x12\x11\n\tbase_conf\x18\x04 \x01(\t\x12\x11\n\tsite_path\x18\x05 \x01(\t\x12\x1f\n\x17initial_dhcp_lease_time\x18\x06 \x01(\t\x12\x17\n\x0f\x64hcp_lease_time\x18\x07 \x01(\t\x12\x19\n\x11\x64hcp_response_sec\x18\' \x01(\x05\x12\x1e\n\x16long_dhcp_response_sec\x18\x08 \x01(\x05\x12\"\n\x0cswitch_setup\x18\t \x01(\x0b\x32\x0c.SwitchSetup\x12\x12\n\nhost_tests\x18\x10 \x01(\t\x12\x13\n\x0b\x62uild_tests\x18$ \x01(\x08\x12\x11\n\trun_limit\x18\x11 \x01(\x05\x12\x11\n\tfail_mode\x18\x12 \x01(\x08\x12\x13\n\x0bsingle_shot\x18\" \x01(\x08\x12\x15\n\rresult_linger\x18\x13 \x01(\x08\x12\x0f\n\x07no_test\x18\x14 \x01(\x08\x12\x11\n\tkeep_hold\x18( \x01(\x08\x12\x14\n\x0c\x64\x61q_loglevel\x18\x15 \x01(\t\x12\x18\n\x10mininet_loglevel\x18\x16 \x01(\t\x12\x13\n\x0b\x66inish_hook\x18# \x01(\t\x12\x10\n\x08gcp_cred\x18\x17 \x01(\t\x12\x11\n\tgcp_topic\x18\x18 \x01(\t\x12\x13\n\x0bschema_path\x18\x19 \x01(\t\x12\x11\n\tmud_files\x18\x1a \x01(\t\x12\x14\n\x0c\x64\x65vice_specs\x18\x1b \x01(\t\x12\x13\n\x0btest_config\x18\x1c \x01(\t\x12\x19\n\x11port_debounce_sec\x18\x1d \x01(\x05\x12\x11\n\tfail_hook\x18\x1e \x01(\t\x12\x17\n\x0f\x64\x65vice_template\x18\x1f \x01(\t\x12\x14\n\x0csite_reports\x18 \x01(\t\x12\x1f\n\x17run_data_retention_days\x18! \x01(\x02\x12.\n\ninterfaces\x18% \x03(\x0b\x32\x1a.DaqConfig.InterfacesEntry\x12/\n\x0b\x66\x61il_module\x18/ \x03(\x0b\x32\x1a.DaqConfig.FailModuleEntry\x12\x1d\n\x15port_flap_timeout_sec\x18\x30 \x01(\x05\x12\x1c\n\tusi_setup\x18\x31 \x01(\x0b\x32\t.USISetup\x1a=\n\x0fInterfacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.Interface:\x02\x38\x01\x1a\x31\n\x0f\x46\x61ilModuleEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x17\n\x08USISetup\x12\x0b\n\x03url\x18\x01 \x01(\t\"\xe2\x01\n\x0bSwitchSetup\x12\x11\n\tctrl_intf\x18\t \x01(\t\x12\x0f\n\x07ip_addr\x18\x0b \x01(\t\x12\x13\n\x0buplink_port\x18\r \x01(\x05\x12\x0f\n\x07lo_port\x18\x0e \x01(\x05\x12\x0f\n\x07lo_addr\x18\x0f \x01(\t\x12\x11\n\tmods_addr\x18\x10 \x01(\t\x12\x0f\n\x07of_dpid\x18) \x01(\t\x12\x11\n\tdata_intf\x18* \x01(\t\x12\x0e\n\x06\x65xt_br\x18+ \x01(\t\x12\r\n\x05model\x18, \x01(\t\x12\x10\n\x08username\x18- \x01(\t\x12\x10\n\x08password\x18. \x01(\t\"\'\n\tInterface\x12\x0c\n\x04opts\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x62\x06proto3') ) @@ -34,7 +35,7 @@ _descriptor.FieldDescriptor( name='key', full_name='DaqConfig.InterfacesEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -51,14 +52,14 @@ nested_types=[], enum_types=[ ], - serialized_options=b'8\001', + serialized_options=_b('8\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=941, - serialized_end=1002, + serialized_start=971, + serialized_end=1032, ) _DAQCONFIG_FAILMODULEENTRY = _descriptor.Descriptor( @@ -71,14 +72,14 @@ _descriptor.FieldDescriptor( name='key', full_name='DaqConfig.FailModuleEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='DaqConfig.FailModuleEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -88,14 +89,14 @@ nested_types=[], enum_types=[ ], - serialized_options=b'8\001', + serialized_options=_b('8\001'), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=1004, - serialized_end=1053, + serialized_start=1034, + serialized_end=1083, ) _DAQCONFIG = _descriptor.Descriptor( @@ -108,7 +109,7 @@ _descriptor.FieldDescriptor( name='site_description', full_name='DaqConfig.site_description', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -136,28 +137,28 @@ _descriptor.FieldDescriptor( name='base_conf', full_name='DaqConfig.base_conf', index=4, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='site_path', full_name='DaqConfig.site_path', index=5, number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='initial_dhcp_lease_time', full_name='DaqConfig.initial_dhcp_lease_time', index=6, number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dhcp_lease_time', full_name='DaqConfig.dhcp_lease_time', index=7, number=7, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -185,7 +186,7 @@ _descriptor.FieldDescriptor( name='host_tests', full_name='DaqConfig.host_tests', index=11, number=16, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -241,63 +242,63 @@ _descriptor.FieldDescriptor( name='daq_loglevel', full_name='DaqConfig.daq_loglevel', index=19, number=21, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mininet_loglevel', full_name='DaqConfig.mininet_loglevel', index=20, number=22, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='finish_hook', full_name='DaqConfig.finish_hook', index=21, number=35, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='gcp_cred', full_name='DaqConfig.gcp_cred', index=22, number=23, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='gcp_topic', full_name='DaqConfig.gcp_topic', index=23, number=24, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='schema_path', full_name='DaqConfig.schema_path', index=24, number=25, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mud_files', full_name='DaqConfig.mud_files', index=25, number=26, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='device_specs', full_name='DaqConfig.device_specs', index=26, number=27, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='test_config', full_name='DaqConfig.test_config', index=27, number=28, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -311,21 +312,21 @@ _descriptor.FieldDescriptor( name='fail_hook', full_name='DaqConfig.fail_hook', index=29, number=30, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='device_template', full_name='DaqConfig.device_template', index=30, number=31, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='site_reports', full_name='DaqConfig.site_reports', index=31, number=32, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -357,6 +358,13 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='usi_setup', full_name='DaqConfig.usi_setup', index=36, + number=49, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -370,7 +378,38 @@ oneofs=[ ], serialized_start=34, - serialized_end=1053, + serialized_end=1083, +) + + +_USISETUP = _descriptor.Descriptor( + name='USISetup', + full_name='USISetup', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='url', full_name='USISetup.url', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1085, + serialized_end=1108, ) @@ -384,14 +423,14 @@ _descriptor.FieldDescriptor( name='ctrl_intf', full_name='SwitchSetup.ctrl_intf', index=0, number=9, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='ip_addr', full_name='SwitchSetup.ip_addr', index=1, number=11, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -412,56 +451,56 @@ _descriptor.FieldDescriptor( name='lo_addr', full_name='SwitchSetup.lo_addr', index=4, number=15, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mods_addr', full_name='SwitchSetup.mods_addr', index=5, number=16, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='of_dpid', full_name='SwitchSetup.of_dpid', index=6, number=41, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_intf', full_name='SwitchSetup.data_intf', index=7, number=42, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='ext_br', full_name='SwitchSetup.ext_br', index=8, number=43, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='model', full_name='SwitchSetup.model', index=9, number=44, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='username', full_name='SwitchSetup.username', index=10, number=45, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='password', full_name='SwitchSetup.password', index=11, number=46, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -477,8 +516,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1056, - serialized_end=1282, + serialized_start=1111, + serialized_end=1337, ) @@ -492,7 +531,7 @@ _descriptor.FieldDescriptor( name='opts', full_name='Interface.opts', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -515,8 +554,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1284, - serialized_end=1323, + serialized_start=1339, + serialized_end=1378, ) _DAQCONFIG_INTERFACESENTRY.fields_by_name['value'].message_type = _INTERFACE @@ -525,46 +564,55 @@ _DAQCONFIG.fields_by_name['switch_setup'].message_type = _SWITCHSETUP _DAQCONFIG.fields_by_name['interfaces'].message_type = _DAQCONFIG_INTERFACESENTRY _DAQCONFIG.fields_by_name['fail_module'].message_type = _DAQCONFIG_FAILMODULEENTRY +_DAQCONFIG.fields_by_name['usi_setup'].message_type = _USISETUP DESCRIPTOR.message_types_by_name['DaqConfig'] = _DAQCONFIG +DESCRIPTOR.message_types_by_name['USISetup'] = _USISETUP DESCRIPTOR.message_types_by_name['SwitchSetup'] = _SWITCHSETUP DESCRIPTOR.message_types_by_name['Interface'] = _INTERFACE _sym_db.RegisterFileDescriptor(DESCRIPTOR) -DaqConfig = _reflection.GeneratedProtocolMessageType('DaqConfig', (_message.Message,), { +DaqConfig = _reflection.GeneratedProtocolMessageType('DaqConfig', (_message.Message,), dict( - 'InterfacesEntry' : _reflection.GeneratedProtocolMessageType('InterfacesEntry', (_message.Message,), { - 'DESCRIPTOR' : _DAQCONFIG_INTERFACESENTRY, - '__module__' : 'daq.proto.system_config_pb2' + InterfacesEntry = _reflection.GeneratedProtocolMessageType('InterfacesEntry', (_message.Message,), dict( + DESCRIPTOR = _DAQCONFIG_INTERFACESENTRY, + __module__ = 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:DaqConfig.InterfacesEntry) - }) + )) , - 'FailModuleEntry' : _reflection.GeneratedProtocolMessageType('FailModuleEntry', (_message.Message,), { - 'DESCRIPTOR' : _DAQCONFIG_FAILMODULEENTRY, - '__module__' : 'daq.proto.system_config_pb2' + FailModuleEntry = _reflection.GeneratedProtocolMessageType('FailModuleEntry', (_message.Message,), dict( + DESCRIPTOR = _DAQCONFIG_FAILMODULEENTRY, + __module__ = 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:DaqConfig.FailModuleEntry) - }) + )) , - 'DESCRIPTOR' : _DAQCONFIG, - '__module__' : 'daq.proto.system_config_pb2' + DESCRIPTOR = _DAQCONFIG, + __module__ = 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:DaqConfig) - }) + )) _sym_db.RegisterMessage(DaqConfig) _sym_db.RegisterMessage(DaqConfig.InterfacesEntry) _sym_db.RegisterMessage(DaqConfig.FailModuleEntry) -SwitchSetup = _reflection.GeneratedProtocolMessageType('SwitchSetup', (_message.Message,), { - 'DESCRIPTOR' : _SWITCHSETUP, - '__module__' : 'daq.proto.system_config_pb2' +USISetup = _reflection.GeneratedProtocolMessageType('USISetup', (_message.Message,), dict( + DESCRIPTOR = _USISETUP, + __module__ = 'daq.proto.system_config_pb2' + # @@protoc_insertion_point(class_scope:USISetup) + )) +_sym_db.RegisterMessage(USISetup) + +SwitchSetup = _reflection.GeneratedProtocolMessageType('SwitchSetup', (_message.Message,), dict( + DESCRIPTOR = _SWITCHSETUP, + __module__ = 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:SwitchSetup) - }) + )) _sym_db.RegisterMessage(SwitchSetup) -Interface = _reflection.GeneratedProtocolMessageType('Interface', (_message.Message,), { - 'DESCRIPTOR' : _INTERFACE, - '__module__' : 'daq.proto.system_config_pb2' +Interface = _reflection.GeneratedProtocolMessageType('Interface', (_message.Message,), dict( + DESCRIPTOR = _INTERFACE, + __module__ = 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:Interface) - }) + )) _sym_db.RegisterMessage(Interface) diff --git a/libs/proto/usi_pb2.py b/libs/proto/usi_pb2.py new file mode 100644 index 0000000000..c9189dc119 --- /dev/null +++ b/libs/proto/usi_pb2.py @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: usi.proto + +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='usi.proto', + package='usi', + syntax='proto3', + serialized_options=b'\n\004grpcB\010USIProtoP\001', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\tusi.proto\x12\x03usi\"\'\n\x14SwitchActionResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"\x9b\x01\n\rPowerResponse\x12!\n\x19\x63urrent_power_consumption\x18\x01 \x01(\x02\x12\x1d\n\x15max_power_consumption\x18\x02 \x01(\x02\x12$\n\x0bpoe_support\x18\x03 \x01(\x0e\x32\x0f.usi.POESupport\x12\"\n\npoe_status\x18\x04 \x01(\x0e\x32\x0e.usi.POEStatus\"]\n\x11InterfaceResponse\x12$\n\x0blink_status\x18\x01 \x01(\x0e\x32\x0f.usi.LinkStatus\x12\x12\n\nlink_speed\x18\x02 \x01(\x05\x12\x0e\n\x06\x64uplex\x18\x03 \x01(\t\"w\n\nSwitchInfo\x12\x0f\n\x07ip_addr\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65vice_port\x18\x03 \x01(\x05\x12\x1f\n\x05model\x18\x04 \x01(\x0e\x32\x10.usi.SwitchModel\x12\x10\n\x08username\x18\x05 \x01(\t\x12\x10\n\x08password\x18\x06 \x01(\t*F\n\x0bSwitchModel\x12\x17\n\x13\x41LLIED_TELESIS_X230\x10\x00\x12\x0e\n\nCISCO_9300\x10\x01\x12\x0e\n\nOVS_SWITCH\x10\x02*\x1e\n\nLinkStatus\x12\x06\n\x02UP\x10\x00\x12\x08\n\x04\x44OWN\x10\x01*\'\n\nPOESupport\x12\x0b\n\x07\x45NABLED\x10\x00\x12\x0c\n\x08\x44ISABLED\x10\x01*1\n\tPOEStatus\x12\x06\n\x02ON\x10\x00\x12\x07\n\x03OFF\x10\x01\x12\t\n\x05\x46\x41ULT\x10\x02\x12\x08\n\x04\x44\x45NY\x10\x03\x32\xef\x01\n\nUSIService\x12\x31\n\x08GetPower\x12\x0f.usi.SwitchInfo\x1a\x12.usi.PowerResponse\"\x00\x12\x39\n\x0cGetInterface\x12\x0f.usi.SwitchInfo\x1a\x16.usi.InterfaceResponse\"\x00\x12:\n\ndisconnect\x12\x0f.usi.SwitchInfo\x1a\x19.usi.SwitchActionResponse\"\x00\x12\x37\n\x07\x63onnect\x12\x0f.usi.SwitchInfo\x1a\x19.usi.SwitchActionResponse\"\x00\x42\x12\n\x04grpcB\x08USIProtoP\x01\x62\x06proto3' +) + +_SWITCHMODEL = _descriptor.EnumDescriptor( + name='SwitchModel', + full_name='usi.SwitchModel', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='ALLIED_TELESIS_X230', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='CISCO_9300', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='OVS_SWITCH', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=433, + serialized_end=503, +) +_sym_db.RegisterEnumDescriptor(_SWITCHMODEL) + +SwitchModel = enum_type_wrapper.EnumTypeWrapper(_SWITCHMODEL) +_LINKSTATUS = _descriptor.EnumDescriptor( + name='LinkStatus', + full_name='usi.LinkStatus', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='UP', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='DOWN', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=505, + serialized_end=535, +) +_sym_db.RegisterEnumDescriptor(_LINKSTATUS) + +LinkStatus = enum_type_wrapper.EnumTypeWrapper(_LINKSTATUS) +_POESUPPORT = _descriptor.EnumDescriptor( + name='POESupport', + full_name='usi.POESupport', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='ENABLED', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='DISABLED', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=537, + serialized_end=576, +) +_sym_db.RegisterEnumDescriptor(_POESUPPORT) + +POESupport = enum_type_wrapper.EnumTypeWrapper(_POESUPPORT) +_POESTATUS = _descriptor.EnumDescriptor( + name='POEStatus', + full_name='usi.POEStatus', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='ON', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='OFF', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='FAULT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='DENY', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=578, + serialized_end=627, +) +_sym_db.RegisterEnumDescriptor(_POESTATUS) + +POEStatus = enum_type_wrapper.EnumTypeWrapper(_POESTATUS) +ALLIED_TELESIS_X230 = 0 +CISCO_9300 = 1 +OVS_SWITCH = 2 +UP = 0 +DOWN = 1 +ENABLED = 0 +DISABLED = 1 +ON = 0 +OFF = 1 +FAULT = 2 +DENY = 3 + + + +_SWITCHACTIONRESPONSE = _descriptor.Descriptor( + name='SwitchActionResponse', + full_name='usi.SwitchActionResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='success', full_name='usi.SwitchActionResponse.success', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=18, + serialized_end=57, +) + + +_POWERRESPONSE = _descriptor.Descriptor( + name='PowerResponse', + full_name='usi.PowerResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='current_power_consumption', full_name='usi.PowerResponse.current_power_consumption', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='max_power_consumption', full_name='usi.PowerResponse.max_power_consumption', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='poe_support', full_name='usi.PowerResponse.poe_support', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='poe_status', full_name='usi.PowerResponse.poe_status', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=60, + serialized_end=215, +) + + +_INTERFACERESPONSE = _descriptor.Descriptor( + name='InterfaceResponse', + full_name='usi.InterfaceResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='link_status', full_name='usi.InterfaceResponse.link_status', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='link_speed', full_name='usi.InterfaceResponse.link_speed', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='duplex', full_name='usi.InterfaceResponse.duplex', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=217, + serialized_end=310, +) + + +_SWITCHINFO = _descriptor.Descriptor( + name='SwitchInfo', + full_name='usi.SwitchInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ip_addr', full_name='usi.SwitchInfo.ip_addr', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='device_port', full_name='usi.SwitchInfo.device_port', index=1, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='model', full_name='usi.SwitchInfo.model', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='username', full_name='usi.SwitchInfo.username', index=3, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='password', full_name='usi.SwitchInfo.password', index=4, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=312, + serialized_end=431, +) + +_POWERRESPONSE.fields_by_name['poe_support'].enum_type = _POESUPPORT +_POWERRESPONSE.fields_by_name['poe_status'].enum_type = _POESTATUS +_INTERFACERESPONSE.fields_by_name['link_status'].enum_type = _LINKSTATUS +_SWITCHINFO.fields_by_name['model'].enum_type = _SWITCHMODEL +DESCRIPTOR.message_types_by_name['SwitchActionResponse'] = _SWITCHACTIONRESPONSE +DESCRIPTOR.message_types_by_name['PowerResponse'] = _POWERRESPONSE +DESCRIPTOR.message_types_by_name['InterfaceResponse'] = _INTERFACERESPONSE +DESCRIPTOR.message_types_by_name['SwitchInfo'] = _SWITCHINFO +DESCRIPTOR.enum_types_by_name['SwitchModel'] = _SWITCHMODEL +DESCRIPTOR.enum_types_by_name['LinkStatus'] = _LINKSTATUS +DESCRIPTOR.enum_types_by_name['POESupport'] = _POESUPPORT +DESCRIPTOR.enum_types_by_name['POEStatus'] = _POESTATUS +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +SwitchActionResponse = _reflection.GeneratedProtocolMessageType('SwitchActionResponse', (_message.Message,), { + 'DESCRIPTOR' : _SWITCHACTIONRESPONSE, + '__module__' : 'usi_pb2' + # @@protoc_insertion_point(class_scope:usi.SwitchActionResponse) + }) +_sym_db.RegisterMessage(SwitchActionResponse) + +PowerResponse = _reflection.GeneratedProtocolMessageType('PowerResponse', (_message.Message,), { + 'DESCRIPTOR' : _POWERRESPONSE, + '__module__' : 'usi_pb2' + # @@protoc_insertion_point(class_scope:usi.PowerResponse) + }) +_sym_db.RegisterMessage(PowerResponse) + +InterfaceResponse = _reflection.GeneratedProtocolMessageType('InterfaceResponse', (_message.Message,), { + 'DESCRIPTOR' : _INTERFACERESPONSE, + '__module__' : 'usi_pb2' + # @@protoc_insertion_point(class_scope:usi.InterfaceResponse) + }) +_sym_db.RegisterMessage(InterfaceResponse) + +SwitchInfo = _reflection.GeneratedProtocolMessageType('SwitchInfo', (_message.Message,), { + 'DESCRIPTOR' : _SWITCHINFO, + '__module__' : 'usi_pb2' + # @@protoc_insertion_point(class_scope:usi.SwitchInfo) + }) +_sym_db.RegisterMessage(SwitchInfo) + + +DESCRIPTOR._options = None + +_USISERVICE = _descriptor.ServiceDescriptor( + name='USIService', + full_name='usi.USIService', + file=DESCRIPTOR, + index=0, + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_start=630, + serialized_end=869, + methods=[ + _descriptor.MethodDescriptor( + name='GetPower', + full_name='usi.USIService.GetPower', + index=0, + containing_service=None, + input_type=_SWITCHINFO, + output_type=_POWERRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='GetInterface', + full_name='usi.USIService.GetInterface', + index=1, + containing_service=None, + input_type=_SWITCHINFO, + output_type=_INTERFACERESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='disconnect', + full_name='usi.USIService.disconnect', + index=2, + containing_service=None, + input_type=_SWITCHINFO, + output_type=_SWITCHACTIONRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='connect', + full_name='usi.USIService.connect', + index=3, + containing_service=None, + input_type=_SWITCHINFO, + output_type=_SWITCHACTIONRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), +]) +_sym_db.RegisterServiceDescriptor(_USISERVICE) + +DESCRIPTOR.services_by_name['USIService'] = _USISERVICE + +# @@protoc_insertion_point(module_scope) diff --git a/libs/proto/usi_pb2_grpc.py b/libs/proto/usi_pb2_grpc.py new file mode 100644 index 0000000000..c8e57501c9 --- /dev/null +++ b/libs/proto/usi_pb2_grpc.py @@ -0,0 +1,161 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +import usi_pb2 as usi__pb2 + + +class USIServiceStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetPower = channel.unary_unary( + '/usi.USIService/GetPower', + request_serializer=usi__pb2.SwitchInfo.SerializeToString, + response_deserializer=usi__pb2.PowerResponse.FromString, + ) + self.GetInterface = channel.unary_unary( + '/usi.USIService/GetInterface', + request_serializer=usi__pb2.SwitchInfo.SerializeToString, + response_deserializer=usi__pb2.InterfaceResponse.FromString, + ) + self.disconnect = channel.unary_unary( + '/usi.USIService/disconnect', + request_serializer=usi__pb2.SwitchInfo.SerializeToString, + response_deserializer=usi__pb2.SwitchActionResponse.FromString, + ) + self.connect = channel.unary_unary( + '/usi.USIService/connect', + request_serializer=usi__pb2.SwitchInfo.SerializeToString, + response_deserializer=usi__pb2.SwitchActionResponse.FromString, + ) + + +class USIServiceServicer(object): + """Missing associated documentation comment in .proto file.""" + + def GetPower(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetInterface(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def disconnect(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def connect(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_USIServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetPower': grpc.unary_unary_rpc_method_handler( + servicer.GetPower, + request_deserializer=usi__pb2.SwitchInfo.FromString, + response_serializer=usi__pb2.PowerResponse.SerializeToString, + ), + 'GetInterface': grpc.unary_unary_rpc_method_handler( + servicer.GetInterface, + request_deserializer=usi__pb2.SwitchInfo.FromString, + response_serializer=usi__pb2.InterfaceResponse.SerializeToString, + ), + 'disconnect': grpc.unary_unary_rpc_method_handler( + servicer.disconnect, + request_deserializer=usi__pb2.SwitchInfo.FromString, + response_serializer=usi__pb2.SwitchActionResponse.SerializeToString, + ), + 'connect': grpc.unary_unary_rpc_method_handler( + servicer.connect, + request_deserializer=usi__pb2.SwitchInfo.FromString, + response_serializer=usi__pb2.SwitchActionResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'usi.USIService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class USIService(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def GetPower(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/usi.USIService/GetPower', + usi__pb2.SwitchInfo.SerializeToString, + usi__pb2.PowerResponse.FromString, + options, channel_credentials, + call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetInterface(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/usi.USIService/GetInterface', + usi__pb2.SwitchInfo.SerializeToString, + usi__pb2.InterfaceResponse.FromString, + options, channel_credentials, + call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def disconnect(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/usi.USIService/disconnect', + usi__pb2.SwitchInfo.SerializeToString, + usi__pb2.SwitchActionResponse.FromString, + options, channel_credentials, + call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def connect(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/usi.USIService/connect', + usi__pb2.SwitchInfo.SerializeToString, + usi__pb2.SwitchActionResponse.FromString, + options, channel_credentials, + call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/proto/system_config.proto b/proto/system_config.proto index a0cbfbda48..1566620515 100644 --- a/proto/system_config.proto +++ b/proto/system_config.proto @@ -114,8 +114,17 @@ message DaqConfig { // Set time between port disconnect and host tests shutdown int32 port_flap_timeout_sec = 48; + + // USI url + USISetup usi_setup = 49; } +/** + * USI paramters +**/ +message USISetup { + string url = 1; +} /* * System configuraiton of the access switch. This is used by the system diff --git a/testing/run_unit_tests.sh b/testing/run_unit_tests.sh index b82892e384..a0705015a3 100755 --- a/testing/run_unit_tests.sh +++ b/testing/run_unit_tests.sh @@ -10,7 +10,7 @@ source venv/bin/activate coverage erase -export PYTHONPATH=$BASEDIR/daq:$BASEDIR/mininet:$BASEDIR/faucet:$BASEDIR/forch:$BASEDIR/bin/python +export PYTHONPATH=$BASEDIR/daq:$BASEDIR/mininet:$BASEDIR/faucet:$BASEDIR/forch:$BASEDIR/bin/python:$BASEDIR/libs:$BASEDIR/libs/proto coverage run \ --source $BASEDIR/daq,$BASEDIR/bin/python/ \ -m unittest discover \ diff --git a/usi/Dockerfile.usi b/usi/Dockerfile.usi index ba63658021..4fb6601310 100644 --- a/usi/Dockerfile.usi +++ b/usi/Dockerfile.usi @@ -1,9 +1,8 @@ FROM daqf/aardvark:latest # Do this alone first so it can be re-used by other build files. -RUN $AG update && $AG install openjdk-9-jre -RUN $AG update && $AG install openjdk-9-jdk git +RUN $AG update && $AG install openjdk-11-jdk git maven COPY usi/ usi/ diff --git a/usi/build.conf b/usi/build.conf new file mode 100644 index 0000000000..d469dd0503 --- /dev/null +++ b/usi/build.conf @@ -0,0 +1,2 @@ +build usi +add usi diff --git a/usi/src/main/java/daq/usi/UsiImpl.java b/usi/src/main/java/daq/usi/UsiImpl.java index 6cda498025..840bfe3e90 100644 --- a/usi/src/main/java/daq/usi/UsiImpl.java +++ b/usi/src/main/java/daq/usi/UsiImpl.java @@ -24,9 +24,8 @@ private SwitchController createController(SwitchInfo switchInfo) { SwitchController newController; switch (switchInfo.getModel()) { case ALLIED_TELESIS_X230: { - newController = - new AlliedTelesisX230(switchInfo.getIpAddr(), switchInfo.getUsername(), - switchInfo.getPassword()); + newController = new AlliedTelesisX230(switchInfo.getIpAddr(), switchInfo.getUsername(), + switchInfo.getPassword()); break; } case CISCO_9300: { @@ -39,16 +38,15 @@ private SwitchController createController(SwitchInfo switchInfo) { break; } default: - throw new IllegalArgumentException("Unrecognized switch model " - + switchInfo.getModel()); + throw new IllegalArgumentException("Unrecognized switch model " + switchInfo.getModel()); } newController.start(); return newController; } private SwitchController getSwitchController(SwitchInfo switchInfo) { - String repr = String.join(",", switchInfo.getModel().toString(), - switchInfo.getIpAddr(), switchInfo.getUsername(), + String repr = String.join(",", switchInfo.getModel().toString(), switchInfo.getIpAddr(), + switchInfo.getUsername(), switchInfo.getPassword()); return switchControllers.computeIfAbsent(repr, key -> createController(switchInfo)); } @@ -57,7 +55,10 @@ private SwitchController getSwitchController(SwitchInfo switchInfo) { public void getPower(SwitchInfo request, StreamObserver responseObserver) { SwitchController sc = getSwitchController(request); try { - sc.getPower(request.getDevicePort(), responseObserver::onNext); + sc.getPower(request.getDevicePort(), data -> { + responseObserver.onNext(data); + responseObserver.onCompleted(); + }); } catch (Exception e) { e.printStackTrace(); responseObserver.onError(e); @@ -65,11 +66,13 @@ public void getPower(SwitchInfo request, StreamObserver responseO } @Override - public void getInterface(SwitchInfo request, - StreamObserver responseObserver) { + public void getInterface(SwitchInfo request, StreamObserver responseObserver) { SwitchController sc = getSwitchController(request); try { - sc.getInterface(request.getDevicePort(), responseObserver::onNext); + sc.getInterface(request.getDevicePort(), data -> { + responseObserver.onNext(data); + responseObserver.onCompleted(); + }); } catch (Exception e) { e.printStackTrace(); responseObserver.onError(e); @@ -80,7 +83,10 @@ public void getInterface(SwitchInfo request, public void connect(SwitchInfo request, StreamObserver responseObserver) { SwitchController sc = getSwitchController(request); try { - sc.connect(request.getDevicePort(), responseObserver::onNext); + sc.connect(request.getDevicePort(), data -> { + responseObserver.onNext(data); + responseObserver.onCompleted(); + }); } catch (Exception e) { e.printStackTrace(); responseObserver.onError(e); @@ -92,10 +98,13 @@ public void disconnect(SwitchInfo request, StreamObserver responseObserver) { SwitchController sc = getSwitchController(request); try { - sc.disconnect(request.getDevicePort(), responseObserver::onNext); + sc.disconnect(request.getDevicePort(), data -> { + responseObserver.onNext(data); + responseObserver.onCompleted(); + }); } catch (Exception e) { e.printStackTrace(); responseObserver.onError(e); } } -} \ No newline at end of file +} diff --git a/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java b/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java index 691a04eeb5..ba38631e7e 100644 --- a/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java +++ b/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java @@ -11,7 +11,9 @@ import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.FileReader; +import java.io.IOException; import java.net.URL; +import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -19,31 +21,29 @@ public class OpenVSwitch implements SwitchController { private static final String OVS_OUTPUT_FILE = "ovs_output.txt"; - protected String getInterfaceByPort(int devicePort) throws FileNotFoundException { + protected String getInterfaceByPort(int devicePort) throws IOException { URL file = OpenVSwitch.class.getClassLoader().getResource(OVS_OUTPUT_FILE); if (file == null) { throw new FileNotFoundException(OVS_OUTPUT_FILE + " is not found!"); } FileReader reader = new FileReader(file.getFile()); - BufferedReader bufferedReader = new BufferedReader(reader); - Pattern pattern = Pattern.compile("(^\\s*" + devicePort + ")(\\((.+)\\))(:.*)", 'g'); - String interfaceLine = bufferedReader.lines().filter(line -> { - Matcher m = pattern.matcher(line); - return m.find(); - }).findFirst().get(); - - Matcher m = pattern.matcher(interfaceLine); - m.matches(); - return m.group(3); + try (BufferedReader bufferedReader = new BufferedReader(reader)) { + Pattern pattern = Pattern.compile("(^\\s*" + devicePort + ")(\\((.+)\\))(:.*)", 'g'); + String interfaceLine = bufferedReader.lines().filter(line -> { + Matcher m = pattern.matcher(line); + return m.find(); + }).findFirst().get(); + Matcher m = pattern.matcher(interfaceLine); + m.matches(); + return m.group(3); + } } @Override public void getPower(int devicePort, ResponseHandler handler) throws Exception { PowerResponse.Builder response = PowerResponse.newBuilder(); - PowerResponse power = response.setPoeStatus(POEStatus.OFF) - .setPoeSupport(POESupport.DISABLED) - .setMaxPowerConsumption(0) - .setCurrentPowerConsumption(0).build(); + PowerResponse power = response.setPoeStatus(POEStatus.OFF).setPoeSupport(POESupport.DISABLED) + .setMaxPowerConsumption(0).setCurrentPowerConsumption(0).build(); handler.receiveData(power); } @@ -51,21 +51,23 @@ public void getPower(int devicePort, ResponseHandler handler) thr public void getInterface(int devicePort, ResponseHandler handler) throws Exception { InterfaceResponse.Builder response = InterfaceResponse.newBuilder(); - InterfaceResponse iface = response.setLinkStatus(LinkStatus.UP) - .setDuplex("") - .setLinkSpeed(0) - .build(); + InterfaceResponse iface = + response.setLinkStatus(LinkStatus.UP).setDuplex("").setLinkSpeed(0).build(); handler.receiveData(iface); } private void managePort(int devicePort, ResponseHandler handler, - boolean enabled) throws Exception { + boolean enabled) + throws Exception { String iface = getInterfaceByPort(devicePort); ProcessBuilder processBuilder = new ProcessBuilder(); - processBuilder.command("bash", "-c", "ifconfig " + iface + (enabled ? " up" : " down")); + processBuilder.command("bash", "-c", "ifconfig " + iface + (enabled ? " up" : " down")) + .inheritIO(); Process process = processBuilder.start(); - int exitCode = process.waitFor(); - handler.receiveData(SwitchActionResponse.newBuilder().setSuccess(exitCode == 0).build()); + boolean exited = process.waitFor(10, TimeUnit.SECONDS); + int exitCode = process.exitValue(); + handler + .receiveData(SwitchActionResponse.newBuilder().setSuccess(exited && exitCode == 0).build()); } @Override diff --git a/usi/start b/usi/start index 4c8350ff73..c3fc5e5b8a 100755 --- a/usi/start +++ b/usi/start @@ -1,2 +1,2 @@ #!/bin/bash -e -java -jar usi/target/usi-0.0.1-jar-with-dependencies.jar +java -cp /ovs:usi/target/usi-0.0.1-jar-with-dependencies.jar daq.usi.UsiServer From 9a95b36d648d1569287be3da1355eaa8a875241a Mon Sep 17 00:00:00 2001 From: pbatta Date: Fri, 10 Jul 2020 15:09:28 -0700 Subject: [PATCH 20/38] Update troubleshooting doc (#528) --- docs/troubleshooting.md | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 4a8fa1ba09..32e20582bd 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -9,7 +9,7 @@ mailing list, and use it as the primary source of troubleshooting. email somebody directly, but will likely result in a slower response time. * The `inst/cmdrun.log` file contains a copy of the console output from DAQ. * This file should be attached to communications about resolving DAQ issues. - * It's not necessary to include any assocaited `local/system.yaml` file, since the + * It's not necessary to include any associated `local/system.yaml` file, since the contents of that are already included. * Make sure everything is running properly using the internal simulation setup before tackling anything to do with external switches or physical devices. @@ -29,12 +29,15 @@ a summary of all test results. * The determination of _PASS_ vs. _FAIL_ is one of policy, not a technical consideration. If the question is "Is it OK if this tests fails or not?" then you need to contact whomever is responsible for policy, not DAQ-proper. - * The reports are _optionally_ available trough the _optionally_ configured + * The reports are _optionally_ available through the _optionally_ configured GCP instance, but that's only relevant after the basics are working. -* Capturing a complete zip of the `inst/` directory should encompass all the -state neesary to diagnose/debug problems, so simply captuing that and sending -it along would be sufficient in most cases. Be wary of file size, as `inst/` -can collect cruft over time and occasionally need to be cleaned. +* Running `bin/techsupport.sh` will create a zipped techsupport file that + contains all configuration, packet captures and runtime logs of a run. + Sending that file is sufficient in most cases. Be wary of file + size, as `inst/` might have large pcap files or older files that can be + trimmed to get more manageable file sizes for email attachments. +* Unless you are developing for DAQ and want the latest code, ensure that you + are on the latest stable software version tracked by the git tag `release_stable`. ## Test-Specific @@ -82,4 +85,4 @@ directory. * Filter results for the device's MAC address with something like: tcpdump -en -r testing.pacp ether host de:vi:ce:ma:ca:dr. * There is no one-size-fits-all guidance here, because what is expected is - extremeley test-specific. + extremely test-specific. From 02f1fee5f51ac2a93d9b9d7f5cbf52364ee1258b Mon Sep 17 00:00:00 2001 From: pbatta Date: Mon, 13 Jul 2020 11:33:32 -0700 Subject: [PATCH 21/38] Add troubleshooting script (#529) --- bin/troubleshoot | 29 +++++++++++++++++++++++++++++ docs/troubleshooting.md | 2 ++ 2 files changed, 31 insertions(+) create mode 100755 bin/troubleshoot diff --git a/bin/troubleshoot b/bin/troubleshoot new file mode 100755 index 0000000000..06b3432bd3 --- /dev/null +++ b/bin/troubleshoot @@ -0,0 +1,29 @@ +#!/bin/bash + +ROOT=$(realpath $(dirname $0)/..) +cd $ROOT + +if [ ! -d inst ]; then + echo "Error: run this script after a test run completes" + exit 1 +fi + +# After the system settles (early on some dpid=1 messages are expected) if we see +# unknown dpid in faucet log, dpid might be misconfigured +unknown_dpid=`fgrep 'unknown datapath' inst/faucet.log | wc -l` +if [ "$unknown_dpid" -gt 20 ]; then + echo "Error: Faucet reports unknown datapath DPID:" + fgrep 'unknown datapath' inst/faucet.log | tail -n1 + echo "Check if switch_setup:of_dpid in config matches the DPID on the physical switch" +else + echo "Checking DPID misconfig: ok" +fi + +# If the switch test failed with a monitoring timeout, switch login info could be wrong +switch_timeout=`fgrep 'Monitoring timeout for switch' inst/cmdrun.log` +if [ -n "$switch_timeout" ]; then + echo "Error: Timeout connecting to physical switch" + echo "Check switch username/password configuration" +else + echo "Checking Switch timeout: ok" +fi diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 32e20582bd..05aca2c9d6 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -38,6 +38,8 @@ a summary of all test results. trimmed to get more manageable file sizes for email attachments. * Unless you are developing for DAQ and want the latest code, ensure that you are on the latest stable software version tracked by the git tag `release_stable`. +* If a test run blocks or errors out, try running `bin/troubleshoot` to detect + some common misconfiguration and setup related issues. ## Test-Specific From ca5a7f6db66951f1287f5912eb5d66c78d8fa58f Mon Sep 17 00:00:00 2001 From: Trevor Date: Mon, 13 Jul 2020 11:33:54 -0700 Subject: [PATCH 22/38] Remove deprecated topology files (#530) --- bin/setup_testing | 63 ------------------------ topology/alta-dev/faucet.yaml | 92 ----------------------------------- topology/alta-dev/gauge.yaml | 14 ------ topology/normalize.sh | 12 ----- topology/setup.json | 69 -------------------------- 5 files changed, 250 deletions(-) delete mode 100755 bin/setup_testing delete mode 100644 topology/alta-dev/faucet.yaml delete mode 100644 topology/alta-dev/gauge.yaml delete mode 100755 topology/normalize.sh delete mode 100644 topology/setup.json diff --git a/bin/setup_testing b/bin/setup_testing deleted file mode 100755 index f5e73ca8b1..0000000000 --- a/bin/setup_testing +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -e - -ROOT=$(dirname $0)/.. -cd $ROOT - -bin/build_hash check - -TARGET_ROOT=inst/faucet/daq-faucet-faucet - -for postfix in 1 2; do - TARGET=${TARGET_ROOT}$postfix - echo Preparing $TARGET - sudo rm -rf $TARGET && mkdir -p $TARGET - cp topology/alta-dev/faucet.yaml $TARGET/faucet.yaml - cp topology/alta-dev/gauge.yaml $TARGET/gauge.yaml -done - -cmd/faucet faucet1 6655 -cmd/faucet gauge faucet1 6656 9306 -cmd/faucet faucet2 6657 -cmd/faucet gauge faucet2 6658 9308 - -sudo ip addr flush ganga -sudo ip addr add 192.0.2.10/24 dev ganga - -sudo ovs-vsctl --if-exists del-br upstream -- add-br upstream -sudo ip link del daqnw || true -sudo ip link add daqnw type veth peer name t1bond -sudo ip link set daqnw up -sudo ip link set t1bond up -sudo ovs-vsctl add-port upstream daqnw - -sudo ip link del up_bond || true -sudo ip link add up_bond type bond mode 802.3ad lacp_rate fast -sudo ip link set up_bond up -sudo ip link set yamuna down -sudo ip link set yamuna master up_bond -sudo ip link set beas down -sudo ip link set beas master up_bond -sudo ovs-vsctl add-port upstream up_bond - -cmd/faux -n :t1bond -cmd/faux :satlej -cmd/faux :ravi -cmd/faux :tapti - -echo -docker exec daq-networking-t1bond ip addr -echo Waiting for DHCP... -sleep 30 -echo -docker exec daq-faux-satlej ip addr show dev satlej -echo -docker exec daq-faux-ravi ip addr show dev ravi -echo -docker exec daq-faux-tapti ip addr show dev tapti -echo -docker exec daq-faux-satlej ping -c 3 google.com -docker exec daq-faux-ravi ping -c 3 google.com -docker exec daq-faux-tapti ping -c 3 google.com -docker exec daq-faux-satlej ping -c 3 daq-faux-tapti -echo -echo Done with testing setup. diff --git a/topology/alta-dev/faucet.yaml b/topology/alta-dev/faucet.yaml deleted file mode 100644 index 0dcd804b7e..0000000000 --- a/topology/alta-dev/faucet.yaml +++ /dev/null @@ -1,92 +0,0 @@ -dps: - us-mtv-900-t1sw2-0-1: - dp_id: 147058200621 - faucet_dp_mac: 0e:00:00:00:01:01 - hardware: GenericTFM - interfaces: - 9: - lldp_beacon: {enable: true} - lldp_peer_mac: 0e:00:00:00:02:01 - tagged_vlans: [171] - receive_lldp: true - 10: - lldp_beacon: {enable: true} - lldp_peer_mac: 0e:00:00:00:02:02 - tagged_vlans: [171] - receive_lldp: true - 28: - description: Juniper-Uplink-1 - lacp: 3 - lacp_passthrough: [9, 10] - lldp_beacon: {enable: true} - native_vlan: 171 - receive_lldp: true - lldp_beacon: {max_per_interval: 5, send_interval: 5} - use_hard_timeout: true - us-mtv-900-t1sw2-0-2: - dp_id: 147058200561 - faucet_dp_mac: 0e:00:00:00:01:02 - hardware: GenericTFM - interfaces: - 9: - lldp_beacon: {enable: true} - lldp_peer_mac: 0e:00:00:00:02:01 - tagged_vlans: [171] - receive_lldp: true - 10: - lldp_beacon: {enable: true} - lldp_peer_mac: 0e:00:00:00:02:02 - tagged_vlans: [171] - receive_lldp: true - 28: - description: Juniper-Uplink-2 - lacp: 3 - lacp_passthrough: [9, 10] - lldp_beacon: {enable: true} - native_vlan: 171 - receive_lldp: true - lldp_beacon: {max_per_interval: 5, send_interval: 5} - use_hard_timeout: true - us-mtv-900-t2sw2-0-1: - dp_id: 246406200719452 - faucet_dp_mac: 0e:00:00:00:02:01 - hardware: Allied-Telesis - interface_ranges: - 1-46: {description: IoT Host, native_vlan: 171} - interfaces: - 47: - lldp_beacon: {enable: true} - lldp_failover: 48 - loop_protect_external: true - tagged_vlans: [171] - receive_lldp: true - 48: - lldp_beacon: {enable: true} - loop_protect_external: true - tagged_vlans: [171] - receive_lldp: true - lldp_beacon: {max_per_interval: 5, send_interval: 5} - use_hard_timeout: true - us-mtv-900-t2sw2-0-2: - dp_id: 246406200719346 - faucet_dp_mac: 0e:00:00:00:02:02 - hardware: Allied-Telesis - interface_ranges: - 1-46: {description: IoT Host, native_vlan: 171} - interfaces: - 47: - lldp_beacon: {enable: true} - loop_protect_external: true - tagged_vlans: [171] - receive_lldp: true - 48: - lldp_beacon: {enable: true} - lldp_failover: 47 - loop_protect_external: true - tagged_vlans: [171] - receive_lldp: true - lldp_beacon: {max_per_interval: 5, send_interval: 5} - use_hard_timeout: true -version: 2 -vlans: - 171: {description: BOS-IOT} diff --git a/topology/alta-dev/gauge.yaml b/topology/alta-dev/gauge.yaml deleted file mode 100644 index bf0f0e0f1e..0000000000 --- a/topology/alta-dev/gauge.yaml +++ /dev/null @@ -1,14 +0,0 @@ -dbs: - prometheus: {prometheus_addr: 0.0.0.0, prometheus_port: 9303, type: prometheus} -faucet_configs: [/etc/faucet/faucet.yaml] -watchers: - flow_table: - db: prometheus - dps: [us-mtv-900-t1sw2-0-1, us-mtv-900-t2sw2-0-1, us-mtv-900-t1sw2-0-2, us-mtv-900-t2sw2-0-2] - interval: 10 - type: flow_table - port_stats: - db: prometheus - dps: [us-mtv-900-t1sw2-0-1, us-mtv-900-t2sw2-0-1, us-mtv-900-t1sw2-0-2, us-mtv-900-t2sw2-0-2] - interval: 10 - type: port_stats diff --git a/topology/normalize.sh b/topology/normalize.sh deleted file mode 100755 index 71fb4da312..0000000000 --- a/topology/normalize.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -e - -ROOT=$(dirname $0)/.. -if [ ! -d "$1" ]; then - echo $0 [topology dir] - false -fi - -TDIR=$(realpath $1) - -$ROOT/bin/generate_topology raw_topo=$TDIR topo_dir=$TDIR - diff --git a/topology/setup.json b/topology/setup.json deleted file mode 100644 index 7206b7cfdf..0000000000 --- a/topology/setup.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - 'faucet_yaml': '/etc/faucet/faucet.yaml', - 'faucet_dp_mac_format': '0e:00:00:00:%02x:%02x', - 'lacp_timeout': 5, - 'default_hardware': 'GenericTFM', - 'egress_description': 'egress', - 'combinatorial_port_flood': true, - 'naming': { - 'tier1': '-t1sw', - 'tier2': '-t2sw', - 'control': '-ctr' - }, - 'device_description': 'IoT Device', - 'vlan': { - 'description': 'Faucet IoT', - 'name': 'Faucet_IoT' - }, - 'gauge': { - 'db_type': 'prometheus', - 'interval': 10 - }, - 'db_types': { - 'prometheus': { - 'prometheus_addr': '0.0.0.0', - 'prometheus_port': 9303, - 'type': 'prometheus' - } - }, - 'receive_lldp': true, - 'switch_lldp_beacon': { - 'max_per_interval': 5, - 'send_interval': 5 - }, - 'port_lldp_beacon': { - 'enable': true - }, - 'loop_protect_external': true, - "pre_acls": [ - { - "description": "ICMP Allow", - "nw_proto": 1 - }, - { - "description": "ARP Allow", - "dl_type": "0x0806" - }, - { - "description": "DHCP Allow", - "udp_src": 68, - "udp_dst": 67 - }, - { - "description": "DNS Allow", - "udp_dst": 53 - }, - { - "description": "DHCP Broadcast", - "dl_dst": "ff:ff:ff:ff:ff:ff", - "udp_src": 68, - "udp_dst": 67 - } - ], - "post_acls": [ - { - "description": "Default Deny", - "allow": false - } - ] -} From 2361f80e4b53516af21fc06d70dd1cf5a31ca30c Mon Sep 17 00:00:00 2001 From: Trevor Date: Mon, 13 Jul 2020 16:01:05 -0700 Subject: [PATCH 23/38] Support for alternate sec switch (not managed by DAQ) (#531) --- .gitignore | 18 ++-- bin/alt_faucet | 24 +++++ bin/build_proto | 7 +- bin/external_ovs | 9 +- cmd/exrun | 6 +- config/faucet/faucet_alt-switch.yaml | 22 ++++ config/system/all.conf | 2 +- config/system/alt.yaml | 20 ++++ config/system/ext.conf | 2 +- config/system/ext.yaml | 2 +- config/system/muddy.conf | 2 +- config/system/multi.conf | 2 +- daq/topology.py | 8 +- firebase/public/protos.hash | 2 +- firebase/public/protos.html | 9 +- libs/proto/system_config_pb2.py | 156 ++++++++++++++------------- proto/system_config.proto | 9 +- testing/test_base.out | 2 + testing/test_base.sh | 5 + testing/test_preamble.sh | 1 + 20 files changed, 206 insertions(+), 102 deletions(-) create mode 100755 bin/alt_faucet create mode 100644 config/faucet/faucet_alt-switch.yaml create mode 100644 config/system/alt.yaml diff --git a/.gitignore b/.gitignore index ada16ef8f5..5e280a7725 100644 --- a/.gitignore +++ b/.gitignore @@ -21,16 +21,14 @@ validations/ *.save # Runtime or sub-module files -inst/ -faucet/ -forch/ -mininet/ -local/ -local_xxx -local.old -firebase/.firebaserc -firebase/.firebase -firebase/functions/package-lock.json +/inst/ +/faucet/ +/forch/ +/mininet/ +/local/ +/firebase/.firebaserc +/firebase/.firebase +/firebase/functions/package-lock.json nohup.out **/node_modules/ .vscode/ diff --git a/bin/alt_faucet b/bin/alt_faucet new file mode 100755 index 0000000000..757ad64218 --- /dev/null +++ b/bin/alt_faucet @@ -0,0 +1,24 @@ +#!/bin/bash -e + +ROOT=$(realpath $(dirname $0)/..) +cd $ROOT +source etc/config_base.sh + +if [ -z "$switch_setup_ext_br" ]; then + echo switch_setup.ext_br not defined for alternate faucet setup. + false +fi + +if [ -z "$switch_setup_alt_port" ]; then + echo switch_setup.alt_port not defined for alternate faucet setup. + false +fi + +inst_name=$switch_setup_ext_br + +inst_dir=inst/faucet/daq-faucet-$inst_name +mkdir -p $inst_dir +cp config/faucet/faucet_$inst_name.yaml $inst_dir/faucet.yaml +echo Launching alternate faucet install $inst_name on $switch_setup_alt_port +echo DAQ autoclean docker kill daq-faucet-$inst_name +cmd/faucet $inst_name $switch_setup_alt_port diff --git a/bin/build_proto b/bin/build_proto index b0886e5d86..cd20e074ef 100755 --- a/bin/build_proto +++ b/bin/build_proto @@ -32,6 +32,11 @@ sha1sum $proto_files > $WEB_ROOT/protos.hash gen_path=$ROOT/protoc-gen-doc/bin/protoc-gen-doc +if [ -d venv ]; then + echo Entering virtual python environment... + source venv/bin/activate +fi + mkdir -p build/daq/proto build/proto cp $proto_files build/daq/proto/ proto_files2= @@ -57,4 +62,4 @@ touch libs/proto/__init__.py cp build/daq/proto/*.py libs/proto/ cp build/protos.html $WEB_ROOT/ -python -m grpc_tools.protoc -I usi/src/main/proto/ --python_out=libs/proto/ --grpc_python_out=libs/proto/ usi/src/main/proto/usi.proto +python3 -m grpc_tools.protoc -I usi/src/main/proto/ --python_out=libs/proto/ --grpc_python_out=libs/proto/ usi/src/main/proto/usi.proto diff --git a/bin/external_ovs b/bin/external_ovs index a289bd9cff..080fdcdecb 100755 --- a/bin/external_ovs +++ b/bin/external_ovs @@ -6,14 +6,19 @@ source etc/config_base.sh ext_intf=$switch_setup_data_intf ext_dpid=$switch_setup_of_dpid -ext_ofpt=$switch_setup_lo_port ext_brid=$switch_setup_ext_br ext_brpt=$switch_setup_uplink_port ext_pri=${ext_intf} ext_sec=${ext_intf%-pri}-sec -echo ext_dpid is $ext_dpid +if [ -z "$switch_setup_alt_port" ]; then + ext_ofpt=$switch_setup_lo_port +else + ext_ofpt=$switch_setup_alt_port +fi + +echo ext_dpid is $ext_dpid on port $ext_ofpt echo network_config is $network_config dpid=$(printf %016x $ext_dpid) diff --git a/cmd/exrun b/cmd/exrun index 88a047994b..da511d373f 100755 --- a/cmd/exrun +++ b/cmd/exrun @@ -70,7 +70,7 @@ fi export PYTHONPATH=$FORCH:$FAUCET:$MININET:$LIBS:$PROTO mkdir -p $INSTDIR -rm -f $INSTDIR/faucet* $cleanup_file +rm -rf $INSTDIR/faucet* $cleanup_file docker ps > /dev/null 2>&1 || service docker start @@ -101,6 +101,10 @@ if [ -n "$switch_setup_ext_br" ]; then autostart bin/external_ovs fi +if [ -n "$switch_setup_alt_port" ]; then + autostart bin/alt_faucet +fi + if [ -n "$switch_setup_model" ]; then autostart bin/physical_sec else diff --git a/config/faucet/faucet_alt-switch.yaml b/config/faucet/faucet_alt-switch.yaml new file mode 100644 index 0000000000..876cf786fd --- /dev/null +++ b/config/faucet/faucet_alt-switch.yaml @@ -0,0 +1,22 @@ +dps: + alt-switch: + dp_id: 2 + interfaces: + 1: + native_vlan: 1001 + 2: + native_vlan: 1002 + 3: + native_vlan: 1003 + 4: + native_vlan: 1004 + 5: + native_vlan: 1005 + 100: + tagged_vlans: [1001, 1002, 1003, 1004, 1005] +vlans: + 1001: + 1002: + 1003: + 1004: + 1005: diff --git a/config/system/all.conf b/config/system/all.conf index 0fe77da8fb..612cdc89cb 100644 --- a/config/system/all.conf +++ b/config/system/all.conf @@ -3,7 +3,7 @@ # Load defaults. source config/system/default.yaml -# Description description for dashboard. +# Description for dashboard. site_description="Multi-Device All-Tests Configuration" # Upstream dataplane port from the external (secondary) switch. diff --git a/config/system/alt.yaml b/config/system/alt.yaml new file mode 100644 index 0000000000..e429326c7f --- /dev/null +++ b/config/system/alt.yaml @@ -0,0 +1,20 @@ +# Example configuration file for using an OVS switch not managed by DAQ. + +# Load defaults. +include: config/system/default.yaml + +# Description for dashboard. +site_description: "Alternate (not managed by DAQ) OVS switch configuration" + +# Network switch configuration. +switch_setup: + data_intf: alt-intf + alt_port: 6669 + uplink_port: 100 + ext_br: alt-switch + +# Faux device connection for testing. +interfaces: + faux: + opts: + port: 2 diff --git a/config/system/ext.conf b/config/system/ext.conf index 8625109734..28dc34b707 100644 --- a/config/system/ext.conf +++ b/config/system/ext.conf @@ -3,7 +3,7 @@ # Load defaults. source config/system/default.yaml -# Description description for dashboard. +# Description for dashboard. site_description="External (not integrated with DAQ) OVS switch configuration" # Network switch configuration. diff --git a/config/system/ext.yaml b/config/system/ext.yaml index 7ad626341e..4fef079c1e 100644 --- a/config/system/ext.yaml +++ b/config/system/ext.yaml @@ -3,7 +3,7 @@ # Load defaults. include: config/system/default.yaml -# Description description for dashboard. +# Description for dashboard. site_description: "External (not integrated with DAQ) OVS switch configuration" # Network switch configuration. diff --git a/config/system/muddy.conf b/config/system/muddy.conf index 6510e2b113..3d3a17b30c 100644 --- a/config/system/muddy.conf +++ b/config/system/muddy.conf @@ -3,7 +3,7 @@ # Load defaults. source config/system/default.yaml -# Description description for dashboard. +# Description for dashboard. site_description="Multi-Device Configuration" # Upstream dataplane port from the external (secondary) switch. diff --git a/config/system/multi.conf b/config/system/multi.conf index 185bbc40df..367a94e86b 100644 --- a/config/system/multi.conf +++ b/config/system/multi.conf @@ -3,7 +3,7 @@ # Load defaults. source config/system/default.yaml -# Description description for dashboard. +# Description for dashboard. site_description="Multi-Device Configuration" # Upstream dataplane port from the external (secondary) switch. diff --git a/daq/topology.py b/daq/topology.py index 8d0cb62390..2cda43ebe4 100644 --- a/daq/topology.py +++ b/daq/topology.py @@ -231,22 +231,24 @@ def _make_acl_include(self): def _make_pri_topology(self): pri_dp = {} pri_dp['dp_id'] = self.PRI_DPID - pri_dp['name'] = self.pri_name pri_dp['interfaces'] = self._make_pri_interfaces() return pri_dp def _make_sec_topology(self): sec_dp = {} sec_dp['dp_id'] = self.sec_dpid - sec_dp['name'] = self.sec_name sec_dp['interfaces'] = self._make_sec_interfaces() return sec_dp + def _has_sec_switch(self): + return self.sec_dpid and self.sec_port + def _make_base_network_topology(self): assert self.pri, 'pri dataplane not configured' dps = {} dps['pri'] = self._make_pri_topology() - dps['sec'] = self._make_sec_topology() + if self._has_sec_switch(): + dps['sec'] = self._make_sec_topology() topology = {} topology['dps'] = dps topology['vlans'] = self._make_vlan_description(10) diff --git a/firebase/public/protos.hash b/firebase/public/protos.hash index 3873f3c004..786633c8a9 100644 --- a/firebase/public/protos.hash +++ b/firebase/public/protos.hash @@ -1 +1 @@ -96148b4135bc7326586f96fc38a18beeca8147c4 proto/system_config.proto +b335b4bd73bb5242e822a9b72cf4de6bd010cea3 proto/system_config.proto diff --git a/firebase/public/protos.html b/firebase/public/protos.html index cfcb14e33d..13290969b0 100644 --- a/firebase/public/protos.html +++ b/firebase/public/protos.html @@ -624,7 +624,14 @@

    SwitchSetup

    lo_port int32 -

    Local port of open flow controller

    +

    Local port of DAQ OpenFlow controller

    + + + + alt_port + int32 + +

    Local port for an alternate OpenFlow controller

    diff --git a/libs/proto/system_config_pb2.py b/libs/proto/system_config_pb2.py index e110d5eade..d82945d1c4 100644 --- a/libs/proto/system_config_pb2.py +++ b/libs/proto/system_config_pb2.py @@ -1,8 +1,7 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: daq/proto/system_config.proto -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -19,7 +18,7 @@ package='', syntax='proto3', serialized_options=None, - serialized_pb=_b('\n\x1d\x64\x61q/proto/system_config.proto\"\x99\x08\n\tDaqConfig\x12\x18\n\x10site_description\x18\x01 \x01(\t\x12\x18\n\x10monitor_scan_sec\x18\x02 \x01(\x05\x12\x1b\n\x13\x64\x65\x66\x61ult_timeout_sec\x18\x03 \x01(\x05\x12\x12\n\nsettle_sec\x18& \x01(\x05\x12\x11\n\tbase_conf\x18\x04 \x01(\t\x12\x11\n\tsite_path\x18\x05 \x01(\t\x12\x1f\n\x17initial_dhcp_lease_time\x18\x06 \x01(\t\x12\x17\n\x0f\x64hcp_lease_time\x18\x07 \x01(\t\x12\x19\n\x11\x64hcp_response_sec\x18\' \x01(\x05\x12\x1e\n\x16long_dhcp_response_sec\x18\x08 \x01(\x05\x12\"\n\x0cswitch_setup\x18\t \x01(\x0b\x32\x0c.SwitchSetup\x12\x12\n\nhost_tests\x18\x10 \x01(\t\x12\x13\n\x0b\x62uild_tests\x18$ \x01(\x08\x12\x11\n\trun_limit\x18\x11 \x01(\x05\x12\x11\n\tfail_mode\x18\x12 \x01(\x08\x12\x13\n\x0bsingle_shot\x18\" \x01(\x08\x12\x15\n\rresult_linger\x18\x13 \x01(\x08\x12\x0f\n\x07no_test\x18\x14 \x01(\x08\x12\x11\n\tkeep_hold\x18( \x01(\x08\x12\x14\n\x0c\x64\x61q_loglevel\x18\x15 \x01(\t\x12\x18\n\x10mininet_loglevel\x18\x16 \x01(\t\x12\x13\n\x0b\x66inish_hook\x18# \x01(\t\x12\x10\n\x08gcp_cred\x18\x17 \x01(\t\x12\x11\n\tgcp_topic\x18\x18 \x01(\t\x12\x13\n\x0bschema_path\x18\x19 \x01(\t\x12\x11\n\tmud_files\x18\x1a \x01(\t\x12\x14\n\x0c\x64\x65vice_specs\x18\x1b \x01(\t\x12\x13\n\x0btest_config\x18\x1c \x01(\t\x12\x19\n\x11port_debounce_sec\x18\x1d \x01(\x05\x12\x11\n\tfail_hook\x18\x1e \x01(\t\x12\x17\n\x0f\x64\x65vice_template\x18\x1f \x01(\t\x12\x14\n\x0csite_reports\x18 \x01(\t\x12\x1f\n\x17run_data_retention_days\x18! \x01(\x02\x12.\n\ninterfaces\x18% \x03(\x0b\x32\x1a.DaqConfig.InterfacesEntry\x12/\n\x0b\x66\x61il_module\x18/ \x03(\x0b\x32\x1a.DaqConfig.FailModuleEntry\x12\x1d\n\x15port_flap_timeout_sec\x18\x30 \x01(\x05\x12\x1c\n\tusi_setup\x18\x31 \x01(\x0b\x32\t.USISetup\x1a=\n\x0fInterfacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.Interface:\x02\x38\x01\x1a\x31\n\x0f\x46\x61ilModuleEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x17\n\x08USISetup\x12\x0b\n\x03url\x18\x01 \x01(\t\"\xe2\x01\n\x0bSwitchSetup\x12\x11\n\tctrl_intf\x18\t \x01(\t\x12\x0f\n\x07ip_addr\x18\x0b \x01(\t\x12\x13\n\x0buplink_port\x18\r \x01(\x05\x12\x0f\n\x07lo_port\x18\x0e \x01(\x05\x12\x0f\n\x07lo_addr\x18\x0f \x01(\t\x12\x11\n\tmods_addr\x18\x10 \x01(\t\x12\x0f\n\x07of_dpid\x18) \x01(\t\x12\x11\n\tdata_intf\x18* \x01(\t\x12\x0e\n\x06\x65xt_br\x18+ \x01(\t\x12\r\n\x05model\x18, \x01(\t\x12\x10\n\x08username\x18- \x01(\t\x12\x10\n\x08password\x18. \x01(\t\"\'\n\tInterface\x12\x0c\n\x04opts\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x62\x06proto3') + serialized_pb=b'\n\x1d\x64\x61q/proto/system_config.proto\"\x99\x08\n\tDaqConfig\x12\x18\n\x10site_description\x18\x01 \x01(\t\x12\x18\n\x10monitor_scan_sec\x18\x02 \x01(\x05\x12\x1b\n\x13\x64\x65\x66\x61ult_timeout_sec\x18\x03 \x01(\x05\x12\x12\n\nsettle_sec\x18& \x01(\x05\x12\x11\n\tbase_conf\x18\x04 \x01(\t\x12\x11\n\tsite_path\x18\x05 \x01(\t\x12\x1f\n\x17initial_dhcp_lease_time\x18\x06 \x01(\t\x12\x17\n\x0f\x64hcp_lease_time\x18\x07 \x01(\t\x12\x19\n\x11\x64hcp_response_sec\x18\' \x01(\x05\x12\x1e\n\x16long_dhcp_response_sec\x18\x08 \x01(\x05\x12\"\n\x0cswitch_setup\x18\t \x01(\x0b\x32\x0c.SwitchSetup\x12\x12\n\nhost_tests\x18\x10 \x01(\t\x12\x13\n\x0b\x62uild_tests\x18$ \x01(\x08\x12\x11\n\trun_limit\x18\x11 \x01(\x05\x12\x11\n\tfail_mode\x18\x12 \x01(\x08\x12\x13\n\x0bsingle_shot\x18\" \x01(\x08\x12\x15\n\rresult_linger\x18\x13 \x01(\x08\x12\x0f\n\x07no_test\x18\x14 \x01(\x08\x12\x11\n\tkeep_hold\x18( \x01(\x08\x12\x14\n\x0c\x64\x61q_loglevel\x18\x15 \x01(\t\x12\x18\n\x10mininet_loglevel\x18\x16 \x01(\t\x12\x13\n\x0b\x66inish_hook\x18# \x01(\t\x12\x10\n\x08gcp_cred\x18\x17 \x01(\t\x12\x11\n\tgcp_topic\x18\x18 \x01(\t\x12\x13\n\x0bschema_path\x18\x19 \x01(\t\x12\x11\n\tmud_files\x18\x1a \x01(\t\x12\x14\n\x0c\x64\x65vice_specs\x18\x1b \x01(\t\x12\x13\n\x0btest_config\x18\x1c \x01(\t\x12\x19\n\x11port_debounce_sec\x18\x1d \x01(\x05\x12\x11\n\tfail_hook\x18\x1e \x01(\t\x12\x17\n\x0f\x64\x65vice_template\x18\x1f \x01(\t\x12\x14\n\x0csite_reports\x18 \x01(\t\x12\x1f\n\x17run_data_retention_days\x18! \x01(\x02\x12.\n\ninterfaces\x18% \x03(\x0b\x32\x1a.DaqConfig.InterfacesEntry\x12/\n\x0b\x66\x61il_module\x18/ \x03(\x0b\x32\x1a.DaqConfig.FailModuleEntry\x12\x1d\n\x15port_flap_timeout_sec\x18\x30 \x01(\x05\x12\x1c\n\tusi_setup\x18\x31 \x01(\x0b\x32\t.USISetup\x1a=\n\x0fInterfacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.Interface:\x02\x38\x01\x1a\x31\n\x0f\x46\x61ilModuleEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x17\n\x08USISetup\x12\x0b\n\x03url\x18\x01 \x01(\t\"\xf4\x01\n\x0bSwitchSetup\x12\x11\n\tctrl_intf\x18\t \x01(\t\x12\x0f\n\x07ip_addr\x18\x0b \x01(\t\x12\x13\n\x0buplink_port\x18\r \x01(\x05\x12\x0f\n\x07lo_port\x18\x0e \x01(\x05\x12\x10\n\x08\x61lt_port\x18\x10 \x01(\x05\x12\x0f\n\x07lo_addr\x18\x12 \x01(\t\x12\x11\n\tmods_addr\x18\x14 \x01(\t\x12\x0f\n\x07of_dpid\x18) \x01(\t\x12\x11\n\tdata_intf\x18* \x01(\t\x12\x0e\n\x06\x65xt_br\x18+ \x01(\t\x12\r\n\x05model\x18, \x01(\t\x12\x10\n\x08username\x18- \x01(\t\x12\x10\n\x08password\x18. \x01(\t\"\'\n\tInterface\x12\x0c\n\x04opts\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x62\x06proto3' ) @@ -35,7 +34,7 @@ _descriptor.FieldDescriptor( name='key', full_name='DaqConfig.InterfacesEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -52,7 +51,7 @@ nested_types=[], enum_types=[ ], - serialized_options=_b('8\001'), + serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], @@ -72,14 +71,14 @@ _descriptor.FieldDescriptor( name='key', full_name='DaqConfig.FailModuleEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='DaqConfig.FailModuleEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -89,7 +88,7 @@ nested_types=[], enum_types=[ ], - serialized_options=_b('8\001'), + serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], @@ -109,7 +108,7 @@ _descriptor.FieldDescriptor( name='site_description', full_name='DaqConfig.site_description', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -137,28 +136,28 @@ _descriptor.FieldDescriptor( name='base_conf', full_name='DaqConfig.base_conf', index=4, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='site_path', full_name='DaqConfig.site_path', index=5, number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='initial_dhcp_lease_time', full_name='DaqConfig.initial_dhcp_lease_time', index=6, number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dhcp_lease_time', full_name='DaqConfig.dhcp_lease_time', index=7, number=7, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -186,7 +185,7 @@ _descriptor.FieldDescriptor( name='host_tests', full_name='DaqConfig.host_tests', index=11, number=16, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -242,63 +241,63 @@ _descriptor.FieldDescriptor( name='daq_loglevel', full_name='DaqConfig.daq_loglevel', index=19, number=21, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mininet_loglevel', full_name='DaqConfig.mininet_loglevel', index=20, number=22, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='finish_hook', full_name='DaqConfig.finish_hook', index=21, number=35, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='gcp_cred', full_name='DaqConfig.gcp_cred', index=22, number=23, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='gcp_topic', full_name='DaqConfig.gcp_topic', index=23, number=24, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='schema_path', full_name='DaqConfig.schema_path', index=24, number=25, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mud_files', full_name='DaqConfig.mud_files', index=25, number=26, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='device_specs', full_name='DaqConfig.device_specs', index=26, number=27, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='test_config', full_name='DaqConfig.test_config', index=27, number=28, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -312,21 +311,21 @@ _descriptor.FieldDescriptor( name='fail_hook', full_name='DaqConfig.fail_hook', index=29, number=30, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='device_template', full_name='DaqConfig.device_template', index=30, number=31, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='site_reports', full_name='DaqConfig.site_reports', index=31, number=32, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -392,7 +391,7 @@ _descriptor.FieldDescriptor( name='url', full_name='USISetup.url', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -423,14 +422,14 @@ _descriptor.FieldDescriptor( name='ctrl_intf', full_name='SwitchSetup.ctrl_intf', index=0, number=9, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='ip_addr', full_name='SwitchSetup.ip_addr', index=1, number=11, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -449,58 +448,65 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='lo_addr', full_name='SwitchSetup.lo_addr', index=4, - number=15, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + name='alt_port', full_name='SwitchSetup.alt_port', index=4, + number=16, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='mods_addr', full_name='SwitchSetup.mods_addr', index=5, - number=16, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + name='lo_addr', full_name='SwitchSetup.lo_addr', index=5, + number=18, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='mods_addr', full_name='SwitchSetup.mods_addr', index=6, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='of_dpid', full_name='SwitchSetup.of_dpid', index=6, + name='of_dpid', full_name='SwitchSetup.of_dpid', index=7, number=41, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='data_intf', full_name='SwitchSetup.data_intf', index=7, + name='data_intf', full_name='SwitchSetup.data_intf', index=8, number=42, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='ext_br', full_name='SwitchSetup.ext_br', index=8, + name='ext_br', full_name='SwitchSetup.ext_br', index=9, number=43, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='model', full_name='SwitchSetup.model', index=9, + name='model', full_name='SwitchSetup.model', index=10, number=44, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='username', full_name='SwitchSetup.username', index=10, + name='username', full_name='SwitchSetup.username', index=11, number=45, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='password', full_name='SwitchSetup.password', index=11, + name='password', full_name='SwitchSetup.password', index=12, number=46, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -517,7 +523,7 @@ oneofs=[ ], serialized_start=1111, - serialized_end=1337, + serialized_end=1355, ) @@ -531,7 +537,7 @@ _descriptor.FieldDescriptor( name='opts', full_name='Interface.opts', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), @@ -554,8 +560,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1339, - serialized_end=1378, + serialized_start=1357, + serialized_end=1396, ) _DAQCONFIG_INTERFACESENTRY.fields_by_name['value'].message_type = _INTERFACE @@ -571,48 +577,48 @@ DESCRIPTOR.message_types_by_name['Interface'] = _INTERFACE _sym_db.RegisterFileDescriptor(DESCRIPTOR) -DaqConfig = _reflection.GeneratedProtocolMessageType('DaqConfig', (_message.Message,), dict( +DaqConfig = _reflection.GeneratedProtocolMessageType('DaqConfig', (_message.Message,), { - InterfacesEntry = _reflection.GeneratedProtocolMessageType('InterfacesEntry', (_message.Message,), dict( - DESCRIPTOR = _DAQCONFIG_INTERFACESENTRY, - __module__ = 'daq.proto.system_config_pb2' + 'InterfacesEntry' : _reflection.GeneratedProtocolMessageType('InterfacesEntry', (_message.Message,), { + 'DESCRIPTOR' : _DAQCONFIG_INTERFACESENTRY, + '__module__' : 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:DaqConfig.InterfacesEntry) - )) + }) , - FailModuleEntry = _reflection.GeneratedProtocolMessageType('FailModuleEntry', (_message.Message,), dict( - DESCRIPTOR = _DAQCONFIG_FAILMODULEENTRY, - __module__ = 'daq.proto.system_config_pb2' + 'FailModuleEntry' : _reflection.GeneratedProtocolMessageType('FailModuleEntry', (_message.Message,), { + 'DESCRIPTOR' : _DAQCONFIG_FAILMODULEENTRY, + '__module__' : 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:DaqConfig.FailModuleEntry) - )) + }) , - DESCRIPTOR = _DAQCONFIG, - __module__ = 'daq.proto.system_config_pb2' + 'DESCRIPTOR' : _DAQCONFIG, + '__module__' : 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:DaqConfig) - )) + }) _sym_db.RegisterMessage(DaqConfig) _sym_db.RegisterMessage(DaqConfig.InterfacesEntry) _sym_db.RegisterMessage(DaqConfig.FailModuleEntry) -USISetup = _reflection.GeneratedProtocolMessageType('USISetup', (_message.Message,), dict( - DESCRIPTOR = _USISETUP, - __module__ = 'daq.proto.system_config_pb2' +USISetup = _reflection.GeneratedProtocolMessageType('USISetup', (_message.Message,), { + 'DESCRIPTOR' : _USISETUP, + '__module__' : 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:USISetup) - )) + }) _sym_db.RegisterMessage(USISetup) -SwitchSetup = _reflection.GeneratedProtocolMessageType('SwitchSetup', (_message.Message,), dict( - DESCRIPTOR = _SWITCHSETUP, - __module__ = 'daq.proto.system_config_pb2' +SwitchSetup = _reflection.GeneratedProtocolMessageType('SwitchSetup', (_message.Message,), { + 'DESCRIPTOR' : _SWITCHSETUP, + '__module__' : 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:SwitchSetup) - )) + }) _sym_db.RegisterMessage(SwitchSetup) -Interface = _reflection.GeneratedProtocolMessageType('Interface', (_message.Message,), dict( - DESCRIPTOR = _INTERFACE, - __module__ = 'daq.proto.system_config_pb2' +Interface = _reflection.GeneratedProtocolMessageType('Interface', (_message.Message,), { + 'DESCRIPTOR' : _INTERFACE, + '__module__' : 'daq.proto.system_config_pb2' # @@protoc_insertion_point(class_scope:Interface) - )) + }) _sym_db.RegisterMessage(Interface) diff --git a/proto/system_config.proto b/proto/system_config.proto index 1566620515..1d1d3c6475 100644 --- a/proto/system_config.proto +++ b/proto/system_config.proto @@ -140,14 +140,17 @@ message SwitchSetup { // Dataplane uplink port int32 uplink_port = 13; - // Local port of open flow controller + // Local port of DAQ OpenFlow controller int32 lo_port = 14; + // Local port for an alternate OpenFlow controller + int32 alt_port = 16; + // IP address and subnet for local control plane interface - string lo_addr = 15; + string lo_addr = 18; // IP address template and subnet for module ip addresses - string mods_addr = 16; + string mods_addr = 20; // Dataplane id of external OpenFlow switch string of_dpid = 41; diff --git a/testing/test_base.out b/testing/test_base.out index 86580802e2..edc14eaa30 100644 --- a/testing/test_base.out +++ b/testing/test_base.out @@ -152,6 +152,8 @@ RESULT pass base.switch.ping target %% 192.0.2.138:2 Switch test with target 192.0.2.138:2 Monolog processing base.switch.ping... switch ping 2 +%%%%%%%%%%%%%%%%%%%%%% Alt switch tests +XXX faucet.valve INFO DPID 1 (0x1) pri L2 learned on Port 1 9a:02:57:1e:8f:00 (L2 type 0x0800, L2 dst ff:ff:ff:ff:ff:ff, L3 src X.X.X.X, L3 dst 255.255.255.255) Port 1 VLAN 1002 (1 hosts total) %%%%%%%%%%%%%%%%%%%%%% Mud profile tests result open 01: [] 02: [] 03: [] device open 1 1 1 diff --git a/testing/test_base.sh b/testing/test_base.sh index d39e1c692c..3a4363e93a 100755 --- a/testing/test_base.sh +++ b/testing/test_base.sh @@ -52,6 +52,11 @@ cat -vet inst/run-port-02/nodes/ping02/activate.log count=$(fgrep icmp_seq=5 inst/run-port-02/nodes/ping02/activate.log | wc -l) echo switch ping $count | tee -a $TEST_RESULTS +echo %%%%%%%%%%%%%%%%%%%%%% Alt switch tests | tee -a $TEST_RESULTS +cp config/system/alt.yaml local/system.yaml +# TODO: Replace this with proper test once VLAN-triggers are added. +timeout 120s cmd/run -s +fgrep 'Port 1 9a:02:57:1e:8f:00' inst/faucet.log | redact | tee -a $TEST_RESULTS echo %%%%%%%%%%%%%%%%%%%%%% Mud profile tests | tee -a $TEST_RESULTS rm -f local/system.yaml cp config/system/muddy.conf local/system.conf diff --git a/testing/test_preamble.sh b/testing/test_preamble.sh index 93ca247d89..d01fc546f4 100644 --- a/testing/test_preamble.sh +++ b/testing/test_preamble.sh @@ -56,6 +56,7 @@ function redact { -e 's/[0-9]{4}-.*T.*Z/XXX/' \ -e 's/[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2} [A-Z]{3}/XXX/' \ -e 's/[a-zA-Z]{3} [a-zA-Z]{3}\s+[0-9]{1,2} [0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2} [0-9]{4}/XXX/' \ + -e 's/[A-Za-z]{3} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/XXX/' \ -e 's/[0-9]{4}-(0|1)[0-9]-(0|1|2|3)[0-9] [0-9]{2}:[0-9]{2}:[0-9]{2}(\+00:00)?/XXX/g' \ -e 's/[0-9]+\.[0-9]{2} seconds/XXX/' \ -e 's/0\.[0-9]+s latency/XXX/' \ From fe0bf8b3c9dee0152d76dc9f4d64439d1f043a18 Mon Sep 17 00:00:00 2001 From: henry54809 Date: Tue, 14 Jul 2020 14:34:37 -0700 Subject: [PATCH 24/38] Additional DHCP test part 1 (#532) Port toggle DHCP test --- daq/host.py | 233 ++++++++++++------- daq/report.py | 5 +- docs/device_report.md | 1 + resources/setups/baseline/module_config.json | 19 +- testing/test_aux.out | 30 +++ testing/test_base.out | 1 + testing/test_many.out | 2 + testing/test_many.sh | 49 +++- 8 files changed, 249 insertions(+), 91 deletions(-) diff --git a/daq/host.py b/daq/host.py index 8dc3d5f8ac..2113693132 100644 --- a/daq/host.py +++ b/daq/host.py @@ -5,6 +5,7 @@ import shutil import time from datetime import timedelta, datetime +import logging import grpc from clib import tcpdump_helper @@ -17,9 +18,6 @@ import gcp import logger -LOGGER = logger.get_logger('host') - - class _STATE: """Host state enum for testing cycle""" ERROR = 'Error condition' @@ -55,11 +53,22 @@ def pre_states(): return ['startup', 'sanity', 'ipaddr', 'base', 'monitor'] +def dhcp_tests(): + """Returns all supported dhcp tests""" + return ['port_toggle', 'multi_subnet', 'ip_change'] + + def post_states(): """Return post-test states for recording finalization""" return ['finish', 'info', 'timer'] +def get_test_config(config, test): + """Get a single test module's config""" + if test in dhcp_tests(): + return config['modules'].get('ipaddr', {}).get('dhcp_tests', {}).get(test) + return config["modules"].get(test) + class ConnectedHost: """Class managing a device-under-test""" @@ -73,6 +82,7 @@ class ConnectedHost: _CONFIG_DIR = "config/" _TIMEOUT_EXCEPTION = TimeoutError('Timeout expired') + # pylint: disable=too-many-statements def __init__(self, runner, gateway, target, config): self.configurator = configurator.Configurator() self.runner = runner @@ -86,6 +96,7 @@ def __init__(self, runner, gateway, target, config): self.devdir = self._init_devdir() self.run_id = self.make_runid() self.scan_base = os.path.abspath(os.path.join(self.devdir, 'scans')) + self.logger = logger.get_logger('host%s' % self.target_port) self._port_base = self._get_port_base() self._device_base = self._get_device_base() self.state = None @@ -112,7 +123,9 @@ def __init__(self, runner, gateway, target, config): assert self._loaded_config, 'config was not loaded' self._write_module_config(self._loaded_config, self._device_aux_path()) self.remaining_tests = self._get_enabled_tests() - LOGGER.info('Host %s running with enabled tests %s', self.target_port, self.remaining_tests) + self.dhcp_tests = self._get_dhcp_tests() + self.logger.info('Host %s running with enabled tests %s', self.target_port, + self.remaining_tests) self._report = ReportGenerator(config, self._INST_DIR, self.target_mac, self._loaded_config) self.record_result('startup', state=MODE.PREP) @@ -121,6 +134,12 @@ def __init__(self, runner, gateway, target, config): self._startup_file = None self.timeout_handler = self._aux_module_timeout_handler self._all_ips = [] + self._ip_listener = None + self._dhcp_tests_map = { + 'port_toggle': self._dhcp_port_toggle_test, + 'multi_subnet': None, # TODO + 'ip_change': None # TODO + } @staticmethod def make_runid(): @@ -139,7 +158,7 @@ def _get_port_base(self): return None conf_base = os.path.abspath(os.path.join(test_config, 'port-%02d' % self.target_port)) if not os.path.isdir(conf_base): - LOGGER.warning('Test config directory not found: %s', conf_base) + self.logger.warning('Test config directory not found: %s', conf_base) return None return conf_base @@ -154,22 +173,25 @@ def _make_control_bundle(self): 'paused': self.state == _STATE.READY } + def _get_test_config(self, test): + return get_test_config(self._loaded_config, test) + def _test_enabled(self, test): fallback_config = {'enabled': test in self._CORE_TESTS} - test_config = self._loaded_config['modules'].get(test, fallback_config) + test_config = self._get_test_config(test) or fallback_config return test_config.get('enabled', True) def _get_test_timeout(self, test): - test_module = self._loaded_config['modules'].get(test) if test == 'hold': return None + test_module = self._get_test_config(test) if not test_module: return self._default_timeout_sec return test_module.get('timeout_sec', self._default_timeout_sec) def get_port_flap_timeout(self, test): """Get port toggle timeout configuration that's specific to each test module""" - test_module = self._loaded_config['modules'].get(test) + test_module = self._get_test_config(test) if not test_module: return None return test_module.get('port_flap_timeout_sec') @@ -194,6 +216,10 @@ def _get_static_ip(self): def _get_dhcp_mode(self): return self._loaded_config['modules'].get('ipaddr', {}).get('dhcp_mode', 'normal') + def _get_dhcp_tests(self): + tests = self._loaded_config['modules'].get('ipaddr', {}).get('dhcp_tests', {}).keys() + return list(filter(self._test_enabled, tests)) + def _get_unique_upload_path(self, file_name): base = os.path.basename(file_name) partial = os.path.join('tests', self.test_name, base) if self.test_name else base @@ -210,7 +236,7 @@ def _type_path(self): device_type = dev_config.get('device_type') if not device_type: return None - LOGGER.info('Configuring device %s as type %s', self.target_mac, device_type) + self.logger.info('Configuring device %s as type %s', self.target_mac, device_type) site_path = self.config.get('site_path') type_path = os.path.abspath(os.path.join(site_path, 'device_types', device_type)) return type_path @@ -221,20 +247,20 @@ def _type_aux_path(self): return None aux_path = os.path.join(type_path, self._AUX_DIR) if not os.path.exists(aux_path): - LOGGER.info('Skipping missing type dir %s', aux_path) + self.logger.info('Skipping missing type dir %s', aux_path) return None return aux_path def _create_device_dir(self, path): - LOGGER.warning('Creating new device dir: %s', path) + self.logger.warning('Creating new device dir: %s', path) os.makedirs(path) template_dir = self.config.get('device_template') if not template_dir: - LOGGER.warning('Skipping defaults since no device_template found') + self.logger.warning('Skipping defaults since no device_template found') return - LOGGER.info('Copying template files from %s to %s', template_dir, path) + self.logger.info('Copying template files from %s to %s', template_dir, path) for file in os.listdir(template_dir): - LOGGER.info('Copying %s...', file) + self.logger.info('Copying %s...', file) shutil.copy(os.path.join(template_dir, file), path) def _upload_file(self, path): @@ -243,7 +269,7 @@ def _upload_file(self, path): def initialize(self): """Fully initialize a new host set""" - LOGGER.info('Target port %d initializing...', self.target_port) + self.logger.info('Target port %d initializing...', self.target_port) # There is a race condition here with ovs assigning ports, so wait a bit. time.sleep(2) shutil.rmtree(self.devdir, ignore_errors=True) @@ -265,7 +291,7 @@ def _start_run(self): self._startup_scan() def _mark_skipped_tests(self): - for test in self.config['test_list']: + for test in self.config['test_list'] + dhcp_tests(): if not self._test_enabled(test): self._record_result(test, state=MODE.NOPE) @@ -274,7 +300,7 @@ def _state_transition(self, target, expected=None): message = 'state was %s expected %s' % (self.state, expected) assert self.state == expected, message assert self.state != _STATE.TERM, 'host already terminated' - LOGGER.debug('Target port %d state: %s -> %s', self.target_port, self.state, target) + self.logger.debug('Target port %d state: %s -> %s', self.target_port, self.state, target) self.state = target def _build_switch_info(self) -> usi.SwitchInfo: @@ -316,19 +342,20 @@ def connect_port(self, connect): res = stub.connect(switch_info) else: res = stub.disconnect(switch_info) - LOGGER.info('Target port %s %s successful? %s', self.target_port, "connect" - if connect else "disconnect", res.success) + self.logger.info('Target port %s %s successful? %s', self.target_port, "connect" + if connect else "disconnect", res.success) except Exception as e: - LOGGER.error(e) + self.logger.error(e) + raise e def _prepare(self): - LOGGER.info('Target port %d waiting for ip as %s', self.target_port, self.target_mac) + self.logger.info('Target port %d waiting for ip as %s', self.target_port, self.target_mac) self._state_transition(_STATE.WAITING, _STATE.INIT) self.record_result('sanity', state=MODE.DONE) self.record_result('ipaddr', state=MODE.EXEC) static_ip = self._get_static_ip() if static_ip: - LOGGER.info('Target port %d using static ip', self.target_port) + self.logger.info('Target port %d using static ip', self.target_port) time.sleep(self._STARTUP_MIN_TIME_SEC) self.runner.ip_notify(MODE.NOPE, { 'mac': self.target_mac, @@ -340,8 +367,8 @@ def _prepare(self): # enables dhcp response for this device wait_time = self.runner.config.get("long_dhcp_response_sec") \ if dhcp_mode == 'long_response' else 0 - LOGGER.info('Target port %d using %s DHCP mode, wait %s', - self.target_port, dhcp_mode, wait_time) + self.logger.info('Target port %d using %s DHCP mode, wait %s', + self.target_port, dhcp_mode, wait_time) self.gateway.change_dhcp_response_time(self.target_mac, wait_time) _ = [listener(self) for listener in self._dhcp_listeners] @@ -362,7 +389,8 @@ def heartbeat(self): nowtime = gcp.parse_timestamp(gcp.get_timestamp()) if nowtime >= timeout: if self.timeout_handler: - LOGGER.error('Monitoring timeout for %s after %ds', self.test_name, timeout_sec) + self.logger.error('Monitoring timeout for %s after %ds', self.test_name, + timeout_sec) # ensure it's called once handler, self.timeout_handler = self.timeout_handler, None handler() @@ -376,15 +404,15 @@ def _finalize_report(self): report_paths = self._report.finalize() if self._trigger_path: report_paths.update({'trigger_path': self._trigger_path}) - LOGGER.info('Finalized with reports %s', list(report_paths.keys())) + self.logger.info('Finalized with reports %s', list(report_paths.keys())) report_blobs = {name: self._upload_file(path) for name, path in report_paths.items()} self.record_result('terminate', state=MODE.TERM, **report_blobs) self._report = None def terminate(self, reason, trigger=True): """Terminate this host""" - LOGGER.info('Target port %d terminate, running %s, trigger %s: %s', self.target_port, - self._host_name(), trigger, reason) + self.logger.info('Target port %d terminate, running %s, trigger %s: %s', self.target_port, + self._host_name(), trigger, reason) self._state_transition(_STATE.TERM) self._release_config() self._monitor_cleanup() @@ -396,8 +424,8 @@ def terminate(self, reason, trigger=True): self.test_host = None self.timeout_handler = None except Exception as e: - LOGGER.error('Target port %d terminating test: %s', self.target_port, e) - LOGGER.exception(e) + self.logger.error('Target port %d terminating test: %s', self.target_port, e) + self.logger.exception(e) if trigger: self.runner.target_set_complete(self.target_port, 'Target port %d termination: %s' % ( @@ -418,6 +446,8 @@ def ip_notify(self, target_ip, state=MODE.DONE, delta_sec=-1): self._all_ips.append({"ip": target_ip, "timestamp": time.time()}) if self._get_dhcp_mode() == "ip_change" and len(self._all_ips) == 1: self.gateway.request_new_ip(self.target_mac) + if self._ip_listener: + self._ip_listener(target_ip) def trigger_ready(self): """Check if this host is ready to be triggered""" @@ -433,10 +463,10 @@ def trigger_ready(self): def trigger(self, state=MODE.DONE, target_ip=None, exception=None, delta_sec=-1): """Handle device trigger""" if not self.target_ip and not self.trigger_ready(): - LOGGER.warn('Target port %d ignoring premature trigger', self.target_port) + self.logger.warn('Target port %d ignoring premature trigger', self.target_port) return False if self.target_ip: - LOGGER.debug('Target port %d already triggered', self.target_port) + self.logger.debug('Target port %d already triggered', self.target_port) assert self.target_ip == target_ip, "target_ip mismatch" return True self.target_ip = target_ip @@ -446,28 +476,28 @@ def trigger(self, state=MODE.DONE, target_ip=None, exception=None, delta_sec=-1) self._state_transition(_STATE.ERROR) self.runner.target_set_error(self.target_port, exception) else: - LOGGER.info('Target port %d triggered as %s', self.target_port, target_ip) + self.logger.info('Target port %d triggered as %s', self.target_port, target_ip) self._state_transition(_STATE.BASE, _STATE.WAITING) return True def _ping_test(self, src, dst, src_addr=None): if not src or not dst: - LOGGER.error('Invalid ping test params, src=%s, dst=%s', src, dst) + self.logger.error('Invalid ping test params, src=%s, dst=%s', src, dst) return False return self.runner.ping_test(src, dst, src_addr=src_addr) def _startup_scan(self): self._startup_file = os.path.join(self.scan_base, 'startup.pcap') self._startup_time = datetime.now() - LOGGER.info('Target port %d startup pcap capture', self.target_port) + self.logger.info('Target port %d startup pcap capture', self.target_port) self._monitor_scan(self._startup_file) def _monitor_scan(self, output_file, timeout=None): assert not self._monitor_ref, 'tcp_monitor already active' network = self.runner.network tcp_filter = '' - LOGGER.info('Target port %d pcap intf %s for %ss output in %s', - self.target_port, self._mirror_intf_name, timeout, output_file) + self.logger.info('Target port %d pcap intf %s for %ss output in %s', + self.target_port, self._mirror_intf_name, timeout, output_file) helper = tcpdump_helper.TcpdumpHelper(network.pri, tcp_filter, packets=None, intf_name=self._mirror_intf_name, timeout=timeout, pcap_out=output_file, @@ -483,10 +513,10 @@ def _base_start(self): success = self._base_tests() self._monitor_cleanup() if not success: - LOGGER.warning('Target port %d base tests failed', self.target_port) + self.logger.warning('Target port %d base tests failed', self.target_port) self._state_transition(_STATE.ERROR) return - LOGGER.info('Target port %d done with base.', self.target_port) + self.logger.info('Target port %d done with base.', self.target_port) self._background_scan() except Exception as e: self._monitor_cleanup() @@ -494,7 +524,7 @@ def _base_start(self): def _monitor_cleanup(self, forget=True): if self._monitor_ref: - LOGGER.info('Target port %d network pcap complete', self.target_port) + self.logger.info('Target port %d network pcap complete', self.target_port) active = self._monitor_ref.stream() and not self._monitor_ref.stream().closed assert active == forget, 'forget and active mismatch' self._upload_file(self._startup_file) @@ -504,7 +534,8 @@ def _monitor_cleanup(self, forget=True): self._monitor_ref = None def _monitor_error(self, exception, forget=False): - LOGGER.error('Target port %d monitor error: %s', self.target_port, exception) + self.logger.error('Target port %d monitor error: %s', self.target_port, exception) + self._ip_listener = None self._monitor_cleanup(forget=forget) self.record_result(self.test_name, exception=exception) self._state_transition(_STATE.ERROR) @@ -513,13 +544,13 @@ def _monitor_error(self, exception, forget=False): def _background_scan(self): self._state_transition(_STATE.MONITOR, _STATE.BASE) if not self._monitor_scan_sec: - LOGGER.info('Target port %d skipping background pcap', self.target_port) + self.logger.info('Target port %d skipping background pcap', self.target_port) self._monitor_continue() return self.record_result('monitor', time=self._monitor_scan_sec, state=MODE.EXEC) monitor_file = os.path.join(self.scan_base, 'monitor.pcap') - LOGGER.info('Target port %d background pcap for %ds', - self.target_port, self._monitor_scan_sec) + self.logger.info('Target port %d background pcap for %ds', + self.target_port, self._monitor_scan_sec) self._monitor_scan(monitor_file, timeout=self._monitor_scan_sec) def _monitor_timeout(self, timeout): @@ -530,7 +561,7 @@ def _monitor_timeout(self, timeout): self._monitor_complete() def _monitor_complete(self): - LOGGER.info('Target port %d pcap complete', self.target_port) + self.logger.info('Target port %d pcap complete', self.target_port) self._monitor_cleanup(forget=False) self.record_result('monitor', state=MODE.DONE) self._monitor_continue() @@ -542,7 +573,7 @@ def _monitor_continue(self): def _base_tests(self): self.record_result('base', state=MODE.EXEC) if not self._ping_test(self.gateway.host, self.target_ip): - LOGGER.debug('Target port %d warmup ping failed', self.target_port) + self.logger.debug('Target port %d warmup ping failed', self.target_port) try: success1 = self._ping_test(self.gateway.host, self.target_ip), 'simple ping failed' success2 = self._ping_test(self.gateway.host, self.target_ip, @@ -556,21 +587,36 @@ def _base_tests(self): self.record_result('base', state=MODE.DONE) return True + def _dhcp_port_toggle_test(self, logging_handler): + def ip_listener(target_ip): + self.logger.info("%s test Received ip: %s" % (self.test_name, target_ip)) + if logging_handler: + self.logger.removeHandler(logging_handler) + self._ip_listener = None + self._end_test() + + self.connect_port(False) + time.sleep(self.runner.config.get("port_debounce_sec", 0) + 1) + self.connect_port(True) + self._ip_listener = ip_listener + def _run_next_test(self): try: if self.remaining_tests: - LOGGER.debug('Target port %d executing tests %s', - self.target_port, self.remaining_tests) + self.logger.debug('Target port %d executing tests %s', + self.target_port, self.remaining_tests) self.timeout_handler = self._main_module_timeout_handler self._docker_test(self.remaining_tests.pop(0)) + elif self.dhcp_tests: + self._dhcp_test(self.dhcp_tests.pop(0)) else: - LOGGER.info('Target port %d no more tests remaining', self.target_port) + self.logger.info('Target port %d no more tests remaining', self.target_port) self.timeout_handler = self._aux_module_timeout_handler self._state_transition(_STATE.DONE, _STATE.NEXT) self.test_name = None self.record_result('finish', state=MODE.FINE) except Exception as e: - LOGGER.error('Target port %d start error: %s', self.target_port, e) + self.logger.error('Target port %d start error: %s', self.target_port, e) self._state_transition(_STATE.ERROR) self.runner.target_set_error(self.target_port, e) @@ -584,11 +630,9 @@ def _device_aux_path(self): return path def _docker_test(self, test_name): - self.test_name = test_name - self.test_start = gcp.get_timestamp() self.test_host = docker_test.DockerTest(self.runner, self.target_port, self.devdir, test_name) - LOGGER.debug('test_host start %s/%s', test_name, self._host_name()) + self.logger.debug('test_host start %s/%s', test_name, self._host_name()) try: self.test_port = self.runner.allocate_test_port(self.target_port) @@ -597,7 +641,9 @@ def _docker_test(self, test_name): raise e try: - self._start_test_host() + self._start_test(test_name) + params = self._get_module_params() + self.test_host.start(self.test_port, params, self._docker_callback, self._finish_hook) except Exception as e: self.test_host = None self.runner.release_test_port(self.target_port, self.test_port) @@ -605,14 +651,49 @@ def _docker_test(self, test_name): self._monitor_cleanup() raise e - def _start_test_host(self): - params = self._get_module_params() + def _dhcp_test(self, test_name): + self.logger.info('Target port %d dhcp test %s running', self.target_port, test_name) + self.timeout_handler = self._aux_module_timeout_handler + self._start_test(test_name) + test_fn = self._dhcp_tests_map[test_name] + logging_handler = logging.FileHandler( + os.path.join(self._host_dir_path(), 'activate.log')) + # All the logging from this host will also go to activation log to be stored + self.logger.addHandler(logging_handler) + try: + test_fn(logging_handler) + except Exception as e: + self._end_test(state=MODE.MERR, exception=e) + self.logger.removeHandler(logging_handler) + self._run_next_test() + + def _start_test(self, test_name): + self.test_name = test_name + self.test_start = gcp.get_timestamp() self._write_module_config(self._loaded_config, self._host_tmp_path()) self._record_result(self.test_name, config=self._loaded_config, state=MODE.CONF) self.record_result(self.test_name, state=MODE.EXEC) self._monitor_scan(os.path.join(self.scan_base, 'test_%s.pcap' % self.test_name)) self._state_transition(_STATE.TESTING, _STATE.NEXT) - self.test_host.start(self.test_port, params, self._docker_callback, self._finish_hook) + + def _end_test(self, state=MODE.DONE, return_code=None, exception=None): + self._monitor_cleanup() + self._state_transition(_STATE.NEXT, _STATE.TESTING) + report_path = os.path.join(self._host_tmp_path(), 'report.txt') + activation_log_path = os.path.join(self._host_dir_path(), 'activate.log') + module_config_path = os.path.join(self._host_tmp_path(), self._MODULE_CONFIG) + remote_paths = {} + for result_type, path in ((ResultType.REPORT_PATH, report_path), + (ResultType.ACTIVATION_LOG_PATH, activation_log_path), + (ResultType.MODULE_CONFIG_PATH, module_config_path)): + if os.path.isfile(path): + self._report.accumulate(self.test_name, {result_type: path}) + remote_paths[result_type.value] = self._upload_file(path) + self.record_result(self.test_name, state=state, code=return_code, exception=exception, + **remote_paths) + self.test_host = None + self.timeout_handler = None + self._run_next_test() def _get_module_params(self): switch_setup = self.switch_setup if 'mods_addr' in self.switch_setup else None @@ -643,7 +724,7 @@ def _get_switch_config(self): } def _host_name(self): - return self.test_host.host_name if self.test_host else 'unknown' + return self.test_host.host_name if self.test_host else (self.test_name or 'unknown') def _host_dir_path(self): return os.path.join(self.devdir, 'nodes', self._host_name()) @@ -656,35 +737,19 @@ def _finish_hook(self): finish_dir = os.path.join(self.devdir, 'finish', self._host_name()) shutil.rmtree(finish_dir, ignore_errors=True) os.makedirs(finish_dir) - LOGGER.info('Executing finish_hook: %s %s', self._finish_hook_script, finish_dir) + self.logger.info('Executing finish_hook: %s %s', self._finish_hook_script, finish_dir) os.system('%s %s 2>&1 > %s/finish.out' % (self._finish_hook_script, finish_dir, finish_dir)) def _docker_callback(self, return_code=None, exception=None): host_name = self._host_name() - LOGGER.info('Host callback %s/%s was %s with %s', - self.test_name, host_name, return_code, exception) - self._monitor_cleanup() + self.logger.info('Host callback %s/%s was %s with %s', + self.test_name, host_name, return_code, exception) failed = return_code or exception state = MODE.MERR if failed else MODE.DONE - report_path = os.path.join(self._host_tmp_path(), 'report.txt') - activation_log_path = os.path.join(self._host_dir_path(), 'activate.log') - module_config_path = os.path.join(self._host_tmp_path(), self._MODULE_CONFIG) - remote_paths = {} - for result_type, path in ((ResultType.REPORT_PATH, report_path), - (ResultType.ACTIVATION_LOG_PATH, activation_log_path), - (ResultType.MODULE_CONFIG_PATH, module_config_path)): - if os.path.isfile(path): - self._report.accumulate(self.test_name, {result_type: path}) - remote_paths[result_type.value] = self._upload_file(path) - self.record_result(self.test_name, state=state, code=return_code, exception=exception, - **remote_paths) self.runner.release_test_port(self.target_port, self.test_port) - self._state_transition(_STATE.NEXT, _STATE.TESTING) assert self.test_host, '_docker_callback with no test_host defined' - self.test_host = None - self.timeout_handler = None - self._run_next_test() + self._end_test(state=state, return_code=return_code, exception=exception) def _merge_run_info(self, config): config['run_info'] = { @@ -708,8 +773,8 @@ def record_result(self, name, **kwargs): """Record a named result for this test""" current = gcp.get_timestamp() if name != self.test_name: - LOGGER.debug('Target port %d report %s start %s', - self.target_port, name, current) + self.logger.debug('Target port %d report %s start %s', + self.target_port, name, current) self.test_name = name self.test_start = current if name: @@ -745,12 +810,12 @@ def _exception_message(self, exception): return str(exception) def _control_updated(self, control_config): - LOGGER.info('Updated control config: %s %s', self.target_mac, control_config) + self.logger.info('Updated control config: %s %s', self.target_mac, control_config) paused = control_config.get('paused') if not paused and self.is_ready(): self._start_run() elif paused and not self.is_ready(): - LOGGER.warning('Inconsistent control state for update of %s', self.target_mac) + self.logger.warning('Inconsistent control state for update of %s', self.target_mac) def reload_config(self): """Trigger a config reload due to an external config change.""" @@ -759,12 +824,12 @@ def reload_config(self): if device_ready: self._loaded_config = new_config config_bundle = self._make_config_bundle(new_config) - LOGGER.info('Device config reloaded: %s %s', device_ready, self.target_mac) + self.logger.info('Device config reloaded: %s %s', device_ready, self.target_mac) self._record_result(None, run_info=device_ready, config=config_bundle) return new_config def _dev_config_updated(self, dev_config): - LOGGER.info('Device config update: %s %s', self.target_mac, dev_config) + self.logger.info('Device config update: %s %s', self.target_mac, dev_config) self._write_module_config(dev_config, self._device_base) self.reload_config() diff --git a/daq/report.py b/daq/report.py index 311c8fe89e..d3ee8e5046 100644 --- a/daq/report.py +++ b/daq/report.py @@ -16,7 +16,6 @@ import gcp import logger - LOGGER = logger.get_logger('report') class ResultType(Enum): @@ -306,6 +305,7 @@ def _get_test_info(self, test_name): return self._module_config.get('tests', {}).get(test_name, {}) def _write_repitems(self): + from host import get_test_config # Deferring import for (test_name, result_dict) in self._repitems.items(): # To not write a module header if there is nothing to report def writeln(line, test_name=test_name): @@ -318,7 +318,8 @@ def writeln(line, test_name=test_name): writeln(self._TEST_SUBHEADER % "Report") self._append_file(result_dict[ResultType.REPORT_PATH]) if ResultType.MODULE_CONFIG in result_dict: - config = result_dict[ResultType.MODULE_CONFIG].get("modules", {}).get(test_name) + module_configs = result_dict[ResultType.MODULE_CONFIG] + config = get_test_config(module_configs, test_name) if config and len(config) > 0: writeln(self._TEST_SUBHEADER % "Module Config") table = MdTable(["Attribute", "Value"]) diff --git a/docs/device_report.md b/docs/device_report.md index b22465300b..a2c17dcff1 100644 --- a/docs/device_report.md +++ b/docs/device_report.md @@ -101,6 +101,7 @@ Overall device result FAIL |Attribute|Value| |---|---| |timeout_sec|300| +|dhcp_tests|{'port_toggle': {'enabled': False, 'port_flap_timeout_sec': 20}, 'multi_subnet': {'subnets': [], 'timeout_sec': 600, 'enabled': False}, 'ip_change': {'timeout_sec': 500, 'enabled': False}}| ## Module pass diff --git a/resources/setups/baseline/module_config.json b/resources/setups/baseline/module_config.json index ea0f335568..cb9249d4ca 100644 --- a/resources/setups/baseline/module_config.json +++ b/resources/setups/baseline/module_config.json @@ -1,7 +1,22 @@ { "modules": { "ipaddr": { - "timeout_sec": 300 + "timeout_sec": 300, + "dhcp_tests": { + "port_toggle": { + "enabled": false, + "port_flap_timeout_sec": 20 + }, + "multi_subnet": { + "subnets": [], + "timeout_sec": 600, + "enabled": false + }, + "ip_change": { + "timeout_sec": 500, + "enabled": false + } + } }, "pass": { "enabled": true @@ -37,4 +52,4 @@ "enabled": true } } -} +} \ No newline at end of file diff --git a/testing/test_aux.out b/testing/test_aux.out index bed2e7dec1..816eec3538 100644 --- a/testing/test_aux.out +++ b/testing/test_aux.out @@ -94,6 +94,21 @@ port-01 module_config modules "enabled": false }, "ipaddr": { + "dhcp_tests": { + "ip_change": { + "enabled": false, + "timeout_sec": 500 + }, + "multi_subnet": { + "enabled": false, + "subnets": [], + "timeout_sec": 600 + }, + "port_toggle": { + "enabled": false, + "port_flap_timeout_sec": 20 + } + }, "timeout_sec": 300 }, "macoui": { @@ -145,6 +160,21 @@ port-02 module_config modules "enabled": true }, "ipaddr": { + "dhcp_tests": { + "ip_change": { + "enabled": false, + "timeout_sec": 500 + }, + "multi_subnet": { + "enabled": false, + "subnets": [], + "timeout_sec": 600 + }, + "port_toggle": { + "enabled": false, + "port_flap_timeout_sec": 20 + } + }, "timeout_sec": 300 }, "macoui": { diff --git a/testing/test_base.out b/testing/test_base.out index edc14eaa30..312ac5445f 100644 --- a/testing/test_base.out +++ b/testing/test_base.out @@ -44,6 +44,7 @@ Overall device result PASS |Attribute|Value| |---|---| |timeout_sec|300| +|dhcp_tests|{'port_toggle': {'enabled': False, 'port_flap_timeout_sec': 20}, 'multi_subnet': {'subnets': [], 'timeout_sec': 600, 'enabled': False}, 'ip_change': {'timeout_sec': 500, 'enabled': False}}| ## Module pass diff --git a/testing/test_many.out b/testing/test_many.out index fdef3cc356..2f518961a8 100644 --- a/testing/test_many.out +++ b/testing/test_many.out @@ -4,6 +4,8 @@ DAQ stress test Enough results: 1 Enough DHCP timeouts: 1 Enough static ips: 1 +Enough port toggle tests: 1 +Enough port toggle timeouts: 1 Redacted soak diff No soak report diff Done with many diff --git a/testing/test_many.sh b/testing/test_many.sh index 2dabc2e0c0..7c13bb6644 100755 --- a/testing/test_many.sh +++ b/testing/test_many.sh @@ -3,11 +3,16 @@ source testing/test_preamble.sh # num of devices need to less than 10 -NUM_DEVICES=8 +NUM_DEVICES=9 RUN_LIMIT=20 # num of timeout devices need to be less or equal to num dhcp devices NUM_NO_DHCP_DEVICES=4 NUM_TIMEOUT_DEVICES=2 + +# Extended DHCP tests +NUM_PORT_TOGGLE_DHCP_TEST_DEVICES=2 +NUM_PORT_TOGGLE_DHCP_TEST_TIMEOUT_DEVICES=1 + echo Many Tests >> $TEST_RESULTS echo source config/system/default.yaml > local/system.conf @@ -18,11 +23,11 @@ echo gcp_cred=$gcp_cred >> local/system.conf for iface in $(seq 1 $NUM_DEVICES); do xdhcp="" + intf_mac="9a02571e8f0$iface" + mkdir -p local/site/mac_addrs/$intf_mac if [[ $iface -le $NUM_NO_DHCP_DEVICES ]]; then ip="10.20.0.$((iface+5))" - intf_mac="9a02571e8f0$iface" xdhcp="xdhcp=$ip" - mkdir -p local/site/mac_addrs/$intf_mac if [[ $iface -gt $NUM_TIMEOUT_DEVICES ]]; then #Install site specific configs for xdhcp ips cat < local/site/mac_addrs/$intf_mac/module_config.json @@ -39,6 +44,39 @@ EOF } } } +EOF + fi + elif [[ $iface -le $((NUM_NO_DHCP_DEVICES + NUM_PORT_TOGGLE_DHCP_TEST_DEVICES)) ]]; then + if [[ $iface -le $((NUM_NO_DHCP_DEVICES + NUM_PORT_TOGGLE_DHCP_TEST_TIMEOUT_DEVICES)) ]]; then + cat < local/site/mac_addrs/$intf_mac/module_config.json + { + "modules": { + "ipaddr": { + "dhcp_tests": { + "port_toggle": { + "enabled": true, + "port_flap_timeout_sec": 20, + "timeout_sec": 1 + } + } + } + } + } +EOF + else + cat < local/site/mac_addrs/$intf_mac/module_config.json + { + "modules": { + "ipaddr": { + "dhcp_tests": { + "port_toggle": { + "enabled": true, + "port_flap_timeout_sec": 20 + } + } + } + } + } EOF fi fi @@ -54,6 +92,8 @@ end_time=`date -u -Isec` cat inst/result.log results=$(fgrep [] inst/result.log | wc -l) timeouts=$(fgrep "ipaddr:TimeoutError" inst/result.log | wc -l) +port_toggle_timeouts=$(fgrep "port_toggle:TimeoutError" inst/result.log | wc -l) +port_toggles=$(fgrep "port_toggle test Received ip:" inst/cmdrun.log | wc -l) cat inst/run-port-*/scans/ip_triggers.txt static_ips=$(fgrep nope inst/run-port-*/scans/ip_triggers.txt | wc -l) @@ -69,6 +109,9 @@ echo Enough results: $((results >= 6*RUN_LIMIT/10)) | tee -a $TEST_RESULTS echo Enough DHCP timeouts: $((timeouts >= NUM_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS echo Enough static ips: $((static_ips >= (NUM_NO_DHCP_DEVICES - NUM_TIMEOUT_DEVICES))) | tee -a $TEST_RESULTS +echo Enough port toggle tests: $((port_toggles >= (NUM_PORT_TOGGLE_DHCP_TEST_DEVICES - NUM_PORT_TOGGLE_DHCP_TEST_TIMEOUT_DEVICES) )) | tee -a $TEST_RESULTS +echo Enough port toggle timeouts: $((port_toggle_timeouts >= NUM_PORT_TOGGLE_DHCP_TEST_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS + echo bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time count=2 bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time count=2 From 9f6add897de6474a2ae1e242ac130b8056fa77ad Mon Sep 17 00:00:00 2001 From: Trevor Date: Wed, 15 Jul 2020 14:55:53 -0700 Subject: [PATCH 25/38] Refactor ipaddress module (#536) --- .idea/codeStyles/codeStyleConfig.xml | 5 + .idea/vcs.xml | 4 + config/modules/host.conf | 1 + daq/docker_test.py | 10 +- daq/gcp.py | 3 +- daq/host.py | 96 ++++++++------------ daq/ipaddr_test.py | 70 ++++++++++++++ docs/device_report.md | 21 +++-- libs/proto/usi_pb2.py | 26 ++++-- resources/setups/baseline/module_config.json | 19 +--- testing/test_aux.out | 39 ++------ testing/test_base.out | 21 +++-- testing/test_dhcp.out | 2 +- testing/test_many.out | 4 +- testing/test_many.sh | 38 ++++---- 15 files changed, 192 insertions(+), 167 deletions(-) create mode 100644 .idea/codeStyles/codeStyleConfig.xml create mode 100644 daq/ipaddr_test.py diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml new file mode 100644 index 0000000000..b9d18bf599 --- /dev/null +++ b/.idea/codeStyles/codeStyleConfig.xml @@ -0,0 +1,5 @@ + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml index b8fa9bb7a4..3fc93409e7 100644 --- a/.idea/vcs.xml +++ b/.idea/vcs.xml @@ -36,6 +36,10 @@ + + + + \ No newline at end of file diff --git a/config/modules/host.conf b/config/modules/host.conf index 8ec24df938..deda32bdd6 100644 --- a/config/modules/host.conf +++ b/config/modules/host.conf @@ -7,6 +7,7 @@ build docker/modules add pass add fail add ping +add ipaddr add bacnet add mudgee diff --git a/daq/docker_test.py b/daq/docker_test.py index fdabebd0f7..c38b210b8d 100644 --- a/daq/docker_test.py +++ b/daq/docker_test.py @@ -19,18 +19,17 @@ class DockerTest: CONTAINER_PREFIX = 'daq' # pylint: disable=too-many-arguments - def __init__(self, runner, target_port, tmpdir, test_name, env_vars=None): + def __init__(self, host, target_port, tmpdir, test_name, module_config): self.target_port = target_port self.tmpdir = tmpdir self.test_name = test_name - self.runner = runner + self.runner = host.runner self.host_name = '%s%02d' % (test_name, self.target_port) self.docker_log = None self.docker_host = None self.callback = None self.start_time = None self.pipe = None - self.env_vars = env_vars or [] self._finish_hook = None def start(self, port, params, callback, finish_hook): @@ -44,7 +43,7 @@ def start(self, port, params, callback, finish_hook): def opt_param(key): return params.get(key) or '' # Substitute empty string for None - env_vars = self.env_vars + [ + env_vars = [ "TARGET_NAME=" + self.host_name, "TARGET_IP=" + params['target_ip'], "TARGET_MAC=" + params['target_mac'], @@ -168,3 +167,6 @@ def _docker_complete(self): LOGGER.info("Target port %d test %s passed %ss", self.target_port, self.test_name, delay) self.callback(return_code=return_code, exception=exception) + + def ip_listener(self, target_ip): + """Do nothing b/c docker tests don't care about ip notifications""" diff --git a/daq/gcp.py b/daq/gcp.py index 17051870a1..67db631e0b 100644 --- a/daq/gcp.py +++ b/daq/gcp.py @@ -24,6 +24,7 @@ # pylint: disable=no-member DESCENDING = firestore.Query.DESCENDING + def get_timestamp(): """"Get a JSON-compatible formatted timestamp""" return to_timestamp(datetime.datetime.now(datetime.timezone.utc)) @@ -49,7 +50,7 @@ def __init__(self, config, callback_handler): self._callback_handler = callback_handler cred_file = self.config.get('gcp_cred') if not cred_file: - LOGGER.info('No gcp_cred filr specified in config, disabling gcp use.') + LOGGER.info('No gcp_cred file specified in config, disabling gcp use.') self._pubber = None self._storage = None self._firestore = None diff --git a/daq/host.py b/daq/host.py index 2113693132..5de826f8c9 100644 --- a/daq/host.py +++ b/daq/host.py @@ -5,10 +5,10 @@ import shutil import time from datetime import timedelta, datetime -import logging import grpc from clib import tcpdump_helper + from report import ResultType, ReportGenerator from proto import usi_pb2 as usi from proto import usi_pb2_grpc as usi_service @@ -16,8 +16,10 @@ import configurator import docker_test import gcp +import ipaddr_test import logger + class _STATE: """Host state enum for testing cycle""" ERROR = 'Error condition' @@ -50,7 +52,7 @@ class MODE: def pre_states(): """Return pre-test states for basic operation""" - return ['startup', 'sanity', 'ipaddr', 'base', 'monitor'] + return ['startup', 'sanity', 'acquire', 'base', 'monitor'] def dhcp_tests(): @@ -65,10 +67,9 @@ def post_states(): def get_test_config(config, test): """Get a single test module's config""" - if test in dhcp_tests(): - return config['modules'].get('ipaddr', {}).get('dhcp_tests', {}).get(test) return config["modules"].get(test) + class ConnectedHost: """Class managing a device-under-test""" @@ -123,7 +124,6 @@ def __init__(self, runner, gateway, target, config): assert self._loaded_config, 'config was not loaded' self._write_module_config(self._loaded_config, self._device_aux_path()) self.remaining_tests = self._get_enabled_tests() - self.dhcp_tests = self._get_dhcp_tests() self.logger.info('Host %s running with enabled tests %s', self.target_port, self.remaining_tests) self._report = ReportGenerator(config, self._INST_DIR, self.target_mac, @@ -135,11 +135,6 @@ def __init__(self, runner, gateway, target, config): self.timeout_handler = self._aux_module_timeout_handler self._all_ips = [] self._ip_listener = None - self._dhcp_tests_map = { - 'port_toggle': self._dhcp_port_toggle_test, - 'multi_subnet': None, # TODO - 'ip_change': None # TODO - } @staticmethod def make_runid(): @@ -305,8 +300,11 @@ def _state_transition(self, target, expected=None): def _build_switch_info(self) -> usi.SwitchInfo: switch_config = self._get_switch_config() - if switch_config["model"]: - switch_model = usi.SwitchModel.Value(switch_config["model"]) + model_str = switch_config['model'] + if model_str == 'FAUX_SWITCH': + return None + if model_str: + switch_model = usi.SwitchModel.Value(model_str) else: switch_model = usi.SwitchModel.OVS_SWITCH params = { @@ -335,6 +333,9 @@ def notify_activate(self): def connect_port(self, connect): """Connects/Disconnects port for this host""" switch_info = self._build_switch_info() + if not switch_info: + self.logger.info('No switch model found, skipping port connect') + return False try: with grpc.insecure_channel(self._usi_url) as channel: stub = usi_service.USIServiceStub(channel) @@ -347,12 +348,13 @@ def connect_port(self, connect): except Exception as e: self.logger.error(e) raise e + return True def _prepare(self): self.logger.info('Target port %d waiting for ip as %s', self.target_port, self.target_mac) self._state_transition(_STATE.WAITING, _STATE.INIT) self.record_result('sanity', state=MODE.DONE) - self.record_result('ipaddr', state=MODE.EXEC) + self.record_result('acquire', state=MODE.EXEC) static_ip = self._get_static_ip() if static_ip: self.logger.info('Target port %d using static ip', self.target_port) @@ -378,7 +380,7 @@ def _aux_module_timeout_handler(self): def _main_module_timeout_handler(self): self.test_host.terminate() - self._docker_callback(exception=self._TIMEOUT_EXCEPTION) + self._module_callback(exception=self._TIMEOUT_EXCEPTION) def heartbeat(self): """Checks module run time for each event loop""" @@ -446,8 +448,8 @@ def ip_notify(self, target_ip, state=MODE.DONE, delta_sec=-1): self._all_ips.append({"ip": target_ip, "timestamp": time.time()}) if self._get_dhcp_mode() == "ip_change" and len(self._all_ips) == 1: self.gateway.request_new_ip(self.target_mac) - if self._ip_listener: - self._ip_listener(target_ip) + if self.test_host: + self.test_host.ip_listener(target_ip) def trigger_ready(self): """Check if this host is ready to be triggered""" @@ -471,7 +473,7 @@ def trigger(self, state=MODE.DONE, target_ip=None, exception=None, delta_sec=-1) return True self.target_ip = target_ip self._record_result('info', state='%s/%s' % (self.target_mac, target_ip)) - self.record_result('ipaddr', ip=target_ip, state=state, exception=exception) + self.record_result('acquire', ip=target_ip, state=state, exception=exception) if exception: self._state_transition(_STATE.ERROR) self.runner.target_set_error(self.target_port, exception) @@ -535,7 +537,6 @@ def _monitor_cleanup(self, forget=True): def _monitor_error(self, exception, forget=False): self.logger.error('Target port %d monitor error: %s', self.target_port, exception) - self._ip_listener = None self._monitor_cleanup(forget=forget) self.record_result(self.test_name, exception=exception) self._state_transition(_STATE.ERROR) @@ -568,6 +569,7 @@ def _monitor_complete(self): def _monitor_continue(self): self._state_transition(_STATE.NEXT, _STATE.MONITOR) + self.test_name = None self._run_next_test() def _base_tests(self): @@ -587,33 +589,17 @@ def _base_tests(self): self.record_result('base', state=MODE.DONE) return True - def _dhcp_port_toggle_test(self, logging_handler): - def ip_listener(target_ip): - self.logger.info("%s test Received ip: %s" % (self.test_name, target_ip)) - if logging_handler: - self.logger.removeHandler(logging_handler) - self._ip_listener = None - self._end_test() - - self.connect_port(False) - time.sleep(self.runner.config.get("port_debounce_sec", 0) + 1) - self.connect_port(True) - self._ip_listener = ip_listener - def _run_next_test(self): + assert not self.test_name, 'test_name defined: %s' % self.test_name try: if self.remaining_tests: self.logger.debug('Target port %d executing tests %s', self.target_port, self.remaining_tests) - self.timeout_handler = self._main_module_timeout_handler - self._docker_test(self.remaining_tests.pop(0)) - elif self.dhcp_tests: - self._dhcp_test(self.dhcp_tests.pop(0)) + self._run_test(self.remaining_tests.pop(0)) else: self.logger.info('Target port %d no more tests remaining', self.target_port) self.timeout_handler = self._aux_module_timeout_handler self._state_transition(_STATE.DONE, _STATE.NEXT) - self.test_name = None self.record_result('finish', state=MODE.FINE) except Exception as e: self.logger.error('Target port %d start error: %s', self.target_port, e) @@ -629,10 +615,15 @@ def _device_aux_path(self): os.makedirs(path) return path - def _docker_test(self, test_name): - self.test_host = docker_test.DockerTest(self.runner, self.target_port, - self.devdir, test_name) - self.logger.debug('test_host start %s/%s', test_name, self._host_name()) + def _new_test(self, test_name): + clazz = ipaddr_test.IpAddrTest if test_name == 'ipaddr' else docker_test.DockerTest + return clazz(self, self.target_port, self.devdir, test_name, self._loaded_config) + + def _run_test(self, test_name): + self.timeout_handler = self._main_module_timeout_handler + self.test_host = self._new_test(test_name) + + self.logger.info('Target port %d start %s', self.target_port, self._host_name()) try: self.test_port = self.runner.allocate_test_port(self.target_port) @@ -643,7 +634,7 @@ def _docker_test(self, test_name): try: self._start_test(test_name) params = self._get_module_params() - self.test_host.start(self.test_port, params, self._docker_callback, self._finish_hook) + self.test_host.start(self.test_port, params, self._module_callback, self._finish_hook) except Exception as e: self.test_host = None self.runner.release_test_port(self.target_port, self.test_port) @@ -651,22 +642,6 @@ def _docker_test(self, test_name): self._monitor_cleanup() raise e - def _dhcp_test(self, test_name): - self.logger.info('Target port %d dhcp test %s running', self.target_port, test_name) - self.timeout_handler = self._aux_module_timeout_handler - self._start_test(test_name) - test_fn = self._dhcp_tests_map[test_name] - logging_handler = logging.FileHandler( - os.path.join(self._host_dir_path(), 'activate.log')) - # All the logging from this host will also go to activation log to be stored - self.logger.addHandler(logging_handler) - try: - test_fn(logging_handler) - except Exception as e: - self._end_test(state=MODE.MERR, exception=e) - self.logger.removeHandler(logging_handler) - self._run_next_test() - def _start_test(self, test_name): self.test_name = test_name self.test_start = gcp.get_timestamp() @@ -691,6 +666,7 @@ def _end_test(self, state=MODE.DONE, return_code=None, exception=None): remote_paths[result_type.value] = self._upload_file(path) self.record_result(self.test_name, state=state, code=return_code, exception=exception, **remote_paths) + self.test_name = None self.test_host = None self.timeout_handler = None self._run_next_test() @@ -724,7 +700,7 @@ def _get_switch_config(self): } def _host_name(self): - return self.test_host.host_name if self.test_host else (self.test_name or 'unknown') + return self.test_host.host_name if self.test_host else 'unknown' def _host_dir_path(self): return os.path.join(self.devdir, 'nodes', self._host_name()) @@ -741,14 +717,14 @@ def _finish_hook(self): os.system('%s %s 2>&1 > %s/finish.out' % (self._finish_hook_script, finish_dir, finish_dir)) - def _docker_callback(self, return_code=None, exception=None): + def _module_callback(self, return_code=None, exception=None): host_name = self._host_name() self.logger.info('Host callback %s/%s was %s with %s', self.test_name, host_name, return_code, exception) failed = return_code or exception state = MODE.MERR if failed else MODE.DONE self.runner.release_test_port(self.target_port, self.test_port) - assert self.test_host, '_docker_callback with no test_host defined' + assert self.test_host, '_module_callback with no test_host defined' self._end_test(state=state, return_code=return_code, exception=exception) def _merge_run_info(self, config): diff --git a/daq/ipaddr_test.py b/daq/ipaddr_test.py new file mode 100644 index 0000000000..6bf29fafe2 --- /dev/null +++ b/daq/ipaddr_test.py @@ -0,0 +1,70 @@ +"""Test module encapsulating ip-address tests (including DHCP)""" + +from __future__ import absolute_import +import time +import os +import logger + +LOGGER = logger.get_logger('ipaddr') + + +class IpAddrTest: + """Module for inline ipaddr tests""" + + DEFAULT_WAIT_SEC = 10 + + # pylint: disable=too-many-arguments + def __init__(self, host, target_port, tmpdir, test_name, module_config): + self.host = host + self.target_port = target_port + self.tmpdir = tmpdir + self.test_config = module_config.get('modules').get('ipaddr') + self.test_name = test_name + self.host_name = '%s%02d' % (test_name, self.target_port) + self.log_path = os.path.join(self.tmpdir, 'nodes', self.host_name, 'activate.log') + self.log_file = None + self.callback = None + self.tests = [ + self._dhcp_port_toggle_test, + self._finalize + ] + + def start(self, port, params, callback, finish_hook): + """Start the ip-addr tests""" + self.callback = callback + LOGGER.debug('Target port %d starting ipaddr test %s', self.target_port, self.test_name) + self.log_file = open(self.log_path, 'w') + self._next_test() + + def _next_test(self): + try: + self.tests.pop(0)() + except Exception as e: + self._finalize(exception=e) + + def activate_log(self, message): + """Log an activation message""" + self.log_file.write(message + '\n') + + def _dhcp_port_toggle_test(self): + self.activate_log('dhcp_port_toggle_test') + if not self.host.connect_port(False): + self.activate_log('disconnect port not enabled') + return + time.sleep(self.host.config.get("port_debounce_sec", 0) + 1) + self.host.connect_port(True) + + def _finalize(self, exception=None): + self.terminate() + self.callback(exception=exception) + + def terminate(self): + """Terminate this set of tests""" + self.log_file.close() + self.log_file = None + + def ip_listener(self, target_ip): + """Respond to a ip notification event""" + self.activate_log('ip notification %s' % target_ip) + LOGGER.info("%s received ip %s" % (self.test_name, target_ip)) + self._next_test() diff --git a/docs/device_report.md b/docs/device_report.md index a2c17dcff1..3cc2ac6675 100644 --- a/docs/device_report.md +++ b/docs/device_report.md @@ -93,16 +93,6 @@ Overall device result FAIL |gone|unknown.fake.monkey|Other|Other|| -## Module ipaddr - - -#### Module Config - -|Attribute|Value| -|---|---| -|timeout_sec|300| -|dhcp_tests|{'port_toggle': {'enabled': False, 'port_flap_timeout_sec': 20}, 'multi_subnet': {'subnets': [], 'timeout_sec': 600, 'enabled': False}, 'ip_change': {'timeout_sec': 500, 'enabled': False}}| - ## Module pass @@ -154,6 +144,17 @@ RESULT pass base.target.ping target reached ``` +## Module ipaddr + + +#### Module Config + +|Attribute|Value| +|---|---| +|enabled|True| +|timeout_sec|300| +|port_flap_timeout_sec|20| + ## Module nmap diff --git a/libs/proto/usi_pb2.py b/libs/proto/usi_pb2.py index c9189dc119..9414eb0416 100644 --- a/libs/proto/usi_pb2.py +++ b/libs/proto/usi_pb2.py @@ -20,7 +20,7 @@ syntax='proto3', serialized_options=b'\n\004grpcB\010USIProtoP\001', create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\tusi.proto\x12\x03usi\"\'\n\x14SwitchActionResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"\x9b\x01\n\rPowerResponse\x12!\n\x19\x63urrent_power_consumption\x18\x01 \x01(\x02\x12\x1d\n\x15max_power_consumption\x18\x02 \x01(\x02\x12$\n\x0bpoe_support\x18\x03 \x01(\x0e\x32\x0f.usi.POESupport\x12\"\n\npoe_status\x18\x04 \x01(\x0e\x32\x0e.usi.POEStatus\"]\n\x11InterfaceResponse\x12$\n\x0blink_status\x18\x01 \x01(\x0e\x32\x0f.usi.LinkStatus\x12\x12\n\nlink_speed\x18\x02 \x01(\x05\x12\x0e\n\x06\x64uplex\x18\x03 \x01(\t\"w\n\nSwitchInfo\x12\x0f\n\x07ip_addr\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65vice_port\x18\x03 \x01(\x05\x12\x1f\n\x05model\x18\x04 \x01(\x0e\x32\x10.usi.SwitchModel\x12\x10\n\x08username\x18\x05 \x01(\t\x12\x10\n\x08password\x18\x06 \x01(\t*F\n\x0bSwitchModel\x12\x17\n\x13\x41LLIED_TELESIS_X230\x10\x00\x12\x0e\n\nCISCO_9300\x10\x01\x12\x0e\n\nOVS_SWITCH\x10\x02*\x1e\n\nLinkStatus\x12\x06\n\x02UP\x10\x00\x12\x08\n\x04\x44OWN\x10\x01*\'\n\nPOESupport\x12\x0b\n\x07\x45NABLED\x10\x00\x12\x0c\n\x08\x44ISABLED\x10\x01*1\n\tPOEStatus\x12\x06\n\x02ON\x10\x00\x12\x07\n\x03OFF\x10\x01\x12\t\n\x05\x46\x41ULT\x10\x02\x12\x08\n\x04\x44\x45NY\x10\x03\x32\xef\x01\n\nUSIService\x12\x31\n\x08GetPower\x12\x0f.usi.SwitchInfo\x1a\x12.usi.PowerResponse\"\x00\x12\x39\n\x0cGetInterface\x12\x0f.usi.SwitchInfo\x1a\x16.usi.InterfaceResponse\"\x00\x12:\n\ndisconnect\x12\x0f.usi.SwitchInfo\x1a\x19.usi.SwitchActionResponse\"\x00\x12\x37\n\x07\x63onnect\x12\x0f.usi.SwitchInfo\x1a\x19.usi.SwitchActionResponse\"\x00\x42\x12\n\x04grpcB\x08USIProtoP\x01\x62\x06proto3' + serialized_pb=b'\n\tusi.proto\x12\x03usi\"\'\n\x14SwitchActionResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"\x9b\x01\n\rPowerResponse\x12!\n\x19\x63urrent_power_consumption\x18\x01 \x01(\x02\x12\x1d\n\x15max_power_consumption\x18\x02 \x01(\x02\x12$\n\x0bpoe_support\x18\x03 \x01(\x0e\x32\x0f.usi.POESupport\x12\"\n\npoe_status\x18\x04 \x01(\x0e\x32\x0e.usi.POEStatus\"]\n\x11InterfaceResponse\x12$\n\x0blink_status\x18\x01 \x01(\x0e\x32\x0f.usi.LinkStatus\x12\x12\n\nlink_speed\x18\x02 \x01(\x05\x12\x0e\n\x06\x64uplex\x18\x03 \x01(\t\"w\n\nSwitchInfo\x12\x0f\n\x07ip_addr\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65vice_port\x18\x03 \x01(\x05\x12\x1f\n\x05model\x18\x04 \x01(\x0e\x32\x10.usi.SwitchModel\x12\x10\n\x08username\x18\x05 \x01(\t\x12\x10\n\x08password\x18\x06 \x01(\t*W\n\x0bSwitchModel\x12\x17\n\x13\x41LLIED_TELESIS_X230\x10\x00\x12\x0e\n\nCISCO_9300\x10\x01\x12\x0e\n\nOVS_SWITCH\x10\x02\x12\x0f\n\x0b\x46\x41UX_SWITCH\x10\x03*\x1e\n\nLinkStatus\x12\x06\n\x02UP\x10\x00\x12\x08\n\x04\x44OWN\x10\x01*\'\n\nPOESupport\x12\x0b\n\x07\x45NABLED\x10\x00\x12\x0c\n\x08\x44ISABLED\x10\x01*1\n\tPOEStatus\x12\x06\n\x02ON\x10\x00\x12\x07\n\x03OFF\x10\x01\x12\t\n\x05\x46\x41ULT\x10\x02\x12\x08\n\x04\x44\x45NY\x10\x03\x32\xef\x01\n\nUSIService\x12\x31\n\x08GetPower\x12\x0f.usi.SwitchInfo\x1a\x12.usi.PowerResponse\"\x00\x12\x39\n\x0cGetInterface\x12\x0f.usi.SwitchInfo\x1a\x16.usi.InterfaceResponse\"\x00\x12:\n\ndisconnect\x12\x0f.usi.SwitchInfo\x1a\x19.usi.SwitchActionResponse\"\x00\x12\x37\n\x07\x63onnect\x12\x0f.usi.SwitchInfo\x1a\x19.usi.SwitchActionResponse\"\x00\x42\x12\n\x04grpcB\x08USIProtoP\x01\x62\x06proto3' ) _SWITCHMODEL = _descriptor.EnumDescriptor( @@ -45,11 +45,16 @@ serialized_options=None, type=None, create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='FAUX_SWITCH', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, serialized_start=433, - serialized_end=503, + serialized_end=520, ) _sym_db.RegisterEnumDescriptor(_SWITCHMODEL) @@ -74,8 +79,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=505, - serialized_end=535, + serialized_start=522, + serialized_end=552, ) _sym_db.RegisterEnumDescriptor(_LINKSTATUS) @@ -100,8 +105,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=537, - serialized_end=576, + serialized_start=554, + serialized_end=593, ) _sym_db.RegisterEnumDescriptor(_POESUPPORT) @@ -136,8 +141,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=578, - serialized_end=627, + serialized_start=595, + serialized_end=644, ) _sym_db.RegisterEnumDescriptor(_POESTATUS) @@ -145,6 +150,7 @@ ALLIED_TELESIS_X230 = 0 CISCO_9300 = 1 OVS_SWITCH = 2 +FAUX_SWITCH = 3 UP = 0 DOWN = 1 ENABLED = 0 @@ -398,8 +404,8 @@ index=0, serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_start=630, - serialized_end=869, + serialized_start=647, + serialized_end=886, methods=[ _descriptor.MethodDescriptor( name='GetPower', diff --git a/resources/setups/baseline/module_config.json b/resources/setups/baseline/module_config.json index cb9249d4ca..83eaa9a3b5 100644 --- a/resources/setups/baseline/module_config.json +++ b/resources/setups/baseline/module_config.json @@ -1,22 +1,9 @@ { "modules": { "ipaddr": { + "enabled": true, "timeout_sec": 300, - "dhcp_tests": { - "port_toggle": { - "enabled": false, - "port_flap_timeout_sec": 20 - }, - "multi_subnet": { - "subnets": [], - "timeout_sec": 600, - "enabled": false - }, - "ip_change": { - "timeout_sec": 500, - "enabled": false - } - } + "port_flap_timeout_sec": 20 }, "pass": { "enabled": true @@ -52,4 +39,4 @@ "enabled": true } } -} \ No newline at end of file +} diff --git a/testing/test_aux.out b/testing/test_aux.out index 816eec3538..cf65e1a828 100644 --- a/testing/test_aux.out +++ b/testing/test_aux.out @@ -60,7 +60,7 @@ RESULT pass security.firmware version found: ?\xFF\xFF\x19,>u\x08\x00no RESULT pass connection.network.ntp_support Using NTPv4. RESULT fail connection.network.ntp_support Not using NTPv4. RESULT skip connection.network.ntp_support No NTP packets received. -dhcp requests 1 1 0 1 +dhcp requests 1 1 1 1 01: [] 02: ['02:macoui:TimeoutError', '02:ping:TimeoutError'] 03: [] @@ -94,21 +94,8 @@ port-01 module_config modules "enabled": false }, "ipaddr": { - "dhcp_tests": { - "ip_change": { - "enabled": false, - "timeout_sec": 500 - }, - "multi_subnet": { - "enabled": false, - "subnets": [], - "timeout_sec": 600 - }, - "port_toggle": { - "enabled": false, - "port_flap_timeout_sec": 20 - } - }, + "enabled": true, + "port_flap_timeout_sec": 20, "timeout_sec": 300 }, "macoui": { @@ -160,21 +147,8 @@ port-02 module_config modules "enabled": true }, "ipaddr": { - "dhcp_tests": { - "ip_change": { - "enabled": false, - "timeout_sec": 500 - }, - "multi_subnet": { - "enabled": false, - "subnets": [], - "timeout_sec": 600 - }, - "port_toggle": { - "enabled": false, - "port_flap_timeout_sec": 20 - } - }, + "enabled": true, + "port_flap_timeout_sec": 20, "timeout_sec": 300 }, "macoui": { @@ -232,16 +206,19 @@ inst/gw01/nodes/gw01/activate.log inst/gw02/nodes/gw02/activate.log inst/gw03/nodes/gw03/activate.log inst/run-port-01/nodes/fail01/activate.log +inst/run-port-01/nodes/ipaddr01/activate.log inst/run-port-01/nodes/nmap01/activate.log inst/run-port-01/nodes/pass01/activate.log inst/run-port-01/nodes/ping01/activate.log inst/run-port-02/nodes/fail02/activate.log inst/run-port-02/nodes/hold02/activate.log +inst/run-port-02/nodes/ipaddr02/activate.log inst/run-port-02/nodes/nmap02/activate.log inst/run-port-02/nodes/pass02/activate.log inst/run-port-02/nodes/ping02/activate.log inst/run-port-03/nodes/fail03/activate.log inst/run-port-03/nodes/hold03/activate.log +inst/run-port-03/nodes/ipaddr03/activate.log inst/run-port-03/nodes/nmap03/activate.log inst/run-port-03/nodes/pass03/activate.log inst/run-port-03/nodes/ping03/activate.log diff --git a/testing/test_base.out b/testing/test_base.out index 312ac5445f..d0181c88d0 100644 --- a/testing/test_base.out +++ b/testing/test_base.out @@ -36,16 +36,6 @@ Overall device result PASS |pass|security.ports.nmap|Other|Other|Only allowed ports found open.| -## Module ipaddr - - -#### Module Config - -|Attribute|Value| -|---|---| -|timeout_sec|300| -|dhcp_tests|{'port_toggle': {'enabled': False, 'port_flap_timeout_sec': 20}, 'multi_subnet': {'subnets': [], 'timeout_sec': 600, 'enabled': False}, 'ip_change': {'timeout_sec': 500, 'enabled': False}}| - ## Module pass @@ -97,6 +87,17 @@ RESULT pass base.target.ping target reached ``` +## Module ipaddr + + +#### Module Config + +|Attribute|Value| +|---|---| +|enabled|True| +|timeout_sec|300| +|port_flap_timeout_sec|20| + ## Module nmap diff --git a/testing/test_dhcp.out b/testing/test_dhcp.out index 6d2dc2e822..e7bf981faa 100644 --- a/testing/test_dhcp.out +++ b/testing/test_dhcp.out @@ -1,7 +1,7 @@ Running testing/test_dhcp.sh DHCP Tests 01: [] -02: ['02:ipaddr:TimeoutError'] +02: ['02:acquire:TimeoutError'] 03: [] 04: [] Device 1 ip triggers: 1 0 diff --git a/testing/test_many.out b/testing/test_many.out index 2f518961a8..3e8525aa5e 100644 --- a/testing/test_many.out +++ b/testing/test_many.out @@ -4,8 +4,8 @@ DAQ stress test Enough results: 1 Enough DHCP timeouts: 1 Enough static ips: 1 -Enough port toggle tests: 1 -Enough port toggle timeouts: 1 +Enough ipaddr tests: 1 +Enough ipaddr timeouts: 1 Redacted soak diff No soak report diff Done with many diff --git a/testing/test_many.sh b/testing/test_many.sh index 7c13bb6644..7befe66a76 100755 --- a/testing/test_many.sh +++ b/testing/test_many.sh @@ -10,8 +10,8 @@ NUM_NO_DHCP_DEVICES=4 NUM_TIMEOUT_DEVICES=2 # Extended DHCP tests -NUM_PORT_TOGGLE_DHCP_TEST_DEVICES=2 -NUM_PORT_TOGGLE_DHCP_TEST_TIMEOUT_DEVICES=1 +NUM_IPADDR_TEST_DEVICES=2 +NUM_IPADDR_TEST_TIMEOUT_DEVICES=1 echo Many Tests >> $TEST_RESULTS @@ -46,19 +46,15 @@ EOF } EOF fi - elif [[ $iface -le $((NUM_NO_DHCP_DEVICES + NUM_PORT_TOGGLE_DHCP_TEST_DEVICES)) ]]; then - if [[ $iface -le $((NUM_NO_DHCP_DEVICES + NUM_PORT_TOGGLE_DHCP_TEST_TIMEOUT_DEVICES)) ]]; then + elif [[ $iface -le $((NUM_NO_DHCP_DEVICES + NUM_IPADDR_TEST_DEVICES)) ]]; then + if [[ $iface -le $((NUM_NO_DHCP_DEVICES + NUM_IPADDR_TEST_TIMEOUT_DEVICES)) ]]; then cat < local/site/mac_addrs/$intf_mac/module_config.json { "modules": { "ipaddr": { - "dhcp_tests": { - "port_toggle": { - "enabled": true, - "port_flap_timeout_sec": 20, - "timeout_sec": 1 - } - } + "enabled": true, + "port_flap_timeout_sec": 20, + "timeout_sec": 1 } } } @@ -68,12 +64,8 @@ EOF { "modules": { "ipaddr": { - "dhcp_tests": { - "port_toggle": { - "enabled": true, - "port_flap_timeout_sec": 20 - } - } + "enabled": true, + "port_flap_timeout_sec": 20 } } } @@ -92,25 +84,27 @@ end_time=`date -u -Isec` cat inst/result.log results=$(fgrep [] inst/result.log | wc -l) timeouts=$(fgrep "ipaddr:TimeoutError" inst/result.log | wc -l) -port_toggle_timeouts=$(fgrep "port_toggle:TimeoutError" inst/result.log | wc -l) -port_toggles=$(fgrep "port_toggle test Received ip:" inst/cmdrun.log | wc -l) +ipaddr_timeouts=$(fgrep "ipaddr:TimeoutError" inst/result.log | wc -l) +ip_notifications=$(fgrep "ip notification" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l) cat inst/run-port-*/scans/ip_triggers.txt static_ips=$(fgrep nope inst/run-port-*/scans/ip_triggers.txt | wc -l) more inst/run-port-*/nodes/ping*/activate.log | cat +more inst/run-port-*/nodes/ipaddr*/activate.log | cat echo Found $results clean runs, $timeouts timeouts, and $static_ips static_ips. +echo ipaddr had $ip_notifications notifications and $ipaddr_timeouts timeouts. # This is broken -- should have many more results available! -echo Enough results: $((results >= 6*RUN_LIMIT/10)) | tee -a $TEST_RESULTS +echo Enough results: $((results >= 5*RUN_LIMIT/10)) | tee -a $TEST_RESULTS # $timeouts should strictly equal $NUM_TIMEOUT_DEVICES when dhcp step is fixed. echo Enough DHCP timeouts: $((timeouts >= NUM_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS echo Enough static ips: $((static_ips >= (NUM_NO_DHCP_DEVICES - NUM_TIMEOUT_DEVICES))) | tee -a $TEST_RESULTS -echo Enough port toggle tests: $((port_toggles >= (NUM_PORT_TOGGLE_DHCP_TEST_DEVICES - NUM_PORT_TOGGLE_DHCP_TEST_TIMEOUT_DEVICES) )) | tee -a $TEST_RESULTS -echo Enough port toggle timeouts: $((port_toggle_timeouts >= NUM_PORT_TOGGLE_DHCP_TEST_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS +echo Enough ipaddr tests: $((ip_notifications >= (NUM_IPADDR_TEST_DEVICES - NUM_IPADDR_TEST_TIMEOUT_DEVICES) )) | tee -a $TEST_RESULTS +echo Enough ipaddr timeouts: $((ipaddr_timeouts >= NUM_IPADDR_TEST_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS echo bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time count=2 bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time count=2 From ff20a21fa7cc9bf7bbb1160a8ec870c87ff99d05 Mon Sep 17 00:00:00 2001 From: Trevor Date: Wed, 15 Jul 2020 20:07:15 -0700 Subject: [PATCH 26/38] Remove old python2 components (#537) --- bin/setup_dev | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bin/setup_dev b/bin/setup_dev index 59eb0f6df3..c39518dc96 100755 --- a/bin/setup_dev +++ b/bin/setup_dev @@ -75,10 +75,6 @@ $AG install \ python$PVERSION python3-pkg-resources python3-setuptools \ python$PVERSION-dev python3-pip python emacs-nox python$PVERSION-venv -# Jump through some hoops for mininet, which still has some python2 deps. -$AG install python-pip -python2 -m pip install setuptools - if [ -d mininet ]; then echo Checking mininet version matches $MININETV... targetrev=$(cd mininet; git rev-parse $MININETV) From 02e4cc8789e3ce5b024076fa705a6ab9bc174f9f Mon Sep 17 00:00:00 2001 From: henry54809 Date: Thu, 16 Jul 2020 15:30:05 -0700 Subject: [PATCH 27/38] Additional DHCP test part 2. Multisubnet test (#539) --- config/modules/host.conf | 4 +- daq/gateway.py | 6 ++- daq/host.py | 7 ++-- daq/ipaddr_test.py | 38 +++++++++++++------ .../networking_scripts/change_dhcp_range | 24 ++++++++++++ .../networking_scripts/change_lease_time | 4 +- docs/device_report.md | 11 ------ resources/setups/baseline/module_config.json | 7 ++-- testing/test_aux.out | 25 ++++++++---- testing/test_aux.sh | 2 +- testing/test_base.out | 11 ------ testing/test_many.out | 1 + testing/test_many.sh | 5 ++- 13 files changed, 92 insertions(+), 53 deletions(-) create mode 100755 docker/include/networking_scripts/change_dhcp_range diff --git a/config/modules/host.conf b/config/modules/host.conf index deda32bdd6..cd9e9421b4 100644 --- a/config/modules/host.conf +++ b/config/modules/host.conf @@ -7,7 +7,6 @@ build docker/modules add pass add fail add ping -add ipaddr add bacnet add mudgee @@ -15,5 +14,8 @@ add mudgee include subset/pentests/build.conf include usi/build.conf +# Extended dhcp tests +add ipaddr + # Example of how to remove something. remove unused diff --git a/daq/gateway.py b/daq/gateway.py index 9984dc7a6e..4ee9bc2fd1 100644 --- a/daq/gateway.py +++ b/daq/gateway.py @@ -131,9 +131,13 @@ def change_dhcp_response_time(self, mac, time): self.execute_script('change_dhcp_response_time', mac, time) def stop_dhcp_response(self, mac): - """Stops DHCP respopnse for the device""" + """Stops DHCP response for the device""" self.change_dhcp_response_time(mac, -1) + def change_dhcp_range(self, start, end, prefix_length): + """Change dhcp range for devices""" + self.execute_script('change_dhcp_range', start, end, prefix_length) + def allocate_test_port(self): """Get the test port to use for this gateway setup""" test_port = self._switch_port(self.TEST_OFFSET_START) diff --git a/daq/host.py b/daq/host.py index 5de826f8c9..50d8fe224e 100644 --- a/daq/host.py +++ b/daq/host.py @@ -211,10 +211,6 @@ def _get_static_ip(self): def _get_dhcp_mode(self): return self._loaded_config['modules'].get('ipaddr', {}).get('dhcp_mode', 'normal') - def _get_dhcp_tests(self): - tests = self._loaded_config['modules'].get('ipaddr', {}).get('dhcp_tests', {}).keys() - return list(filter(self._test_enabled, tests)) - def _get_unique_upload_path(self, file_name): base = os.path.basename(file_name) partial = os.path.join('tests', self.test_name, base) if self.test_name else base @@ -448,6 +444,9 @@ def ip_notify(self, target_ip, state=MODE.DONE, delta_sec=-1): self._all_ips.append({"ip": target_ip, "timestamp": time.time()}) if self._get_dhcp_mode() == "ip_change" and len(self._all_ips) == 1: self.gateway.request_new_ip(self.target_mac) + # Update ip directly if it's already triggered. + if self.target_ip: + self.target_ip = target_ip if self.test_host: self.test_host.ip_listener(target_ip) diff --git a/daq/ipaddr_test.py b/daq/ipaddr_test.py index 6bf29fafe2..b56948d9e8 100644 --- a/daq/ipaddr_test.py +++ b/daq/ipaddr_test.py @@ -3,6 +3,7 @@ from __future__ import absolute_import import time import os +import copy import logger LOGGER = logger.get_logger('ipaddr') @@ -11,22 +12,23 @@ class IpAddrTest: """Module for inline ipaddr tests""" - DEFAULT_WAIT_SEC = 10 - # pylint: disable=too-many-arguments def __init__(self, host, target_port, tmpdir, test_name, module_config): self.host = host self.target_port = target_port self.tmpdir = tmpdir self.test_config = module_config.get('modules').get('ipaddr') + self.test_dhcp_ranges = copy.copy(self.test_config.get('dhcp_ranges', [])) self.test_name = test_name self.host_name = '%s%02d' % (test_name, self.target_port) self.log_path = os.path.join(self.tmpdir, 'nodes', self.host_name, 'activate.log') self.log_file = None self.callback = None + self._ip_callback = None self.tests = [ - self._dhcp_port_toggle_test, - self._finalize + ('dhcp port_toggle test', self._dhcp_port_toggle_test), + ('dhcp multi subnet test', self._multi_subnet_test), + ('finalize', self._finalize) ] def start(self, port, params, callback, finish_hook): @@ -38,21 +40,34 @@ def start(self, port, params, callback, finish_hook): def _next_test(self): try: - self.tests.pop(0)() + name, func = self.tests.pop(0) + self.log('Running ' + name) + func() except Exception as e: + self.log(str(e)) self._finalize(exception=e) - def activate_log(self, message): + def log(self, message): """Log an activation message""" + LOGGER.info(message) self.log_file.write(message + '\n') def _dhcp_port_toggle_test(self): - self.activate_log('dhcp_port_toggle_test') if not self.host.connect_port(False): - self.activate_log('disconnect port not enabled') + self.log('disconnect port not enabled') return time.sleep(self.host.config.get("port_debounce_sec", 0) + 1) self.host.connect_port(True) + self._ip_callback = self._next_test + + def _multi_subnet_test(self): + if not self.test_dhcp_ranges: + self._next_test() + return + dhcp_range = self.test_dhcp_ranges.pop(0) + self.log('Testing dhcp range: ' + ",".join([str(arg) for arg in dhcp_range])) + self.host.gateway.change_dhcp_range(*dhcp_range) + self._ip_callback = self._multi_subnet_test if self.test_dhcp_ranges else self._next_test def _finalize(self, exception=None): self.terminate() @@ -60,11 +75,12 @@ def _finalize(self, exception=None): def terminate(self): """Terminate this set of tests""" + self.log('Module terminating') self.log_file.close() self.log_file = None def ip_listener(self, target_ip): """Respond to a ip notification event""" - self.activate_log('ip notification %s' % target_ip) - LOGGER.info("%s received ip %s" % (self.test_name, target_ip)) - self._next_test() + self.log('ip notification %s' % target_ip) + if self._ip_callback: + self._ip_callback() diff --git a/docker/include/networking_scripts/change_dhcp_range b/docker/include/networking_scripts/change_dhcp_range new file mode 100755 index 0000000000..0c45cd6bae --- /dev/null +++ b/docker/include/networking_scripts/change_dhcp_range @@ -0,0 +1,24 @@ +#!/bin/bash -e +# +# Dynamically change DHCP lease range, requires killing and restarting +# dnsmasq as per documentation (SIGHUP does not reload configuration file). +LOCAL_IF=${LOCAL_IF:-$HOSTNAME-eth0} + +range_start=$1 +range_end=$2 +prefix_len=$3 +if [ -z $range_start -o -z $range_end -o -z $prefix_len ]; then + echo "Usage: change_dhcp_range range_start range_end prefix_len" + exit 1 +fi +while [ $(cat /etc/dnsmasq.conf | egrep "^dhcp-range=" | wc -l) == 0 ]; do + sleep 1 +done +ip addr add $range_start/$prefix_len dev $LOCAL_IF || true +original=$(cat /etc/dnsmasq.conf | egrep "^dhcp-range=" | head -1) +lease=$(echo $original | cut -d',' -f 3) +if [ -n "lease" ]; then + lease=",$lease" +fi +new="dhcp-range=$range_start,$range_end$lease" +flock /etc/dnsmasq.conf sed -i s/$original/$new/ /etc/dnsmasq.conf \ No newline at end of file diff --git a/docker/include/networking_scripts/change_lease_time b/docker/include/networking_scripts/change_lease_time index 306e985604..0cb8986c8a 100755 --- a/docker/include/networking_scripts/change_lease_time +++ b/docker/include/networking_scripts/change_lease_time @@ -7,10 +7,10 @@ if [ -z $lease ]; then echo "Lease time not defined." exit 1 fi -while [ $(cat /etc/dnsmasq.conf | grep dhcp-range=10.20 | wc -l) == 0 ]; do +while [ $(cat /etc/dnsmasq.conf | grep "^dhcp-range=" | wc -l) == 0 ]; do sleep 1 done -original=$(cat /etc/dnsmasq.conf | grep dhcp-range=10.20 | head -1) +original=$(cat /etc/dnsmasq.conf | grep "^dhcp-range=" | head -1) new="$(echo $original | cut -d',' -f 1,2),$lease" flock /etc/dnsmasq.conf sed -i s/$original/$new/ /etc/dnsmasq.conf diff --git a/docs/device_report.md b/docs/device_report.md index 3cc2ac6675..4a5be26839 100644 --- a/docs/device_report.md +++ b/docs/device_report.md @@ -144,17 +144,6 @@ RESULT pass base.target.ping target reached ``` -## Module ipaddr - - -#### Module Config - -|Attribute|Value| -|---|---| -|enabled|True| -|timeout_sec|300| -|port_flap_timeout_sec|20| - ## Module nmap diff --git a/resources/setups/baseline/module_config.json b/resources/setups/baseline/module_config.json index 83eaa9a3b5..144c249ce2 100644 --- a/resources/setups/baseline/module_config.json +++ b/resources/setups/baseline/module_config.json @@ -1,9 +1,10 @@ { "modules": { "ipaddr": { - "enabled": true, - "timeout_sec": 300, - "port_flap_timeout_sec": 20 + "enabled": false, + "timeout_sec": 900, + "port_flap_timeout_sec": 20, + "dhcp_ranges": [["192.168.0.1", "192.168.255.254", 16]] }, "pass": { "enabled": true diff --git a/testing/test_aux.out b/testing/test_aux.out index cf65e1a828..b46c806282 100644 --- a/testing/test_aux.out +++ b/testing/test_aux.out @@ -94,9 +94,16 @@ port-01 module_config modules "enabled": false }, "ipaddr": { - "enabled": true, + "dhcp_ranges": [ + [ + "192.168.0.1", + "192.168.255.254", + 16 + ] + ], + "enabled": false, "port_flap_timeout_sec": 20, - "timeout_sec": 300 + "timeout_sec": 900 }, "macoui": { "enabled": true @@ -147,9 +154,16 @@ port-02 module_config modules "enabled": true }, "ipaddr": { - "enabled": true, + "dhcp_ranges": [ + [ + "192.168.0.1", + "192.168.255.254", + 16 + ] + ], + "enabled": false, "port_flap_timeout_sec": 20, - "timeout_sec": 300 + "timeout_sec": 900 }, "macoui": { "enabled": true, @@ -206,19 +220,16 @@ inst/gw01/nodes/gw01/activate.log inst/gw02/nodes/gw02/activate.log inst/gw03/nodes/gw03/activate.log inst/run-port-01/nodes/fail01/activate.log -inst/run-port-01/nodes/ipaddr01/activate.log inst/run-port-01/nodes/nmap01/activate.log inst/run-port-01/nodes/pass01/activate.log inst/run-port-01/nodes/ping01/activate.log inst/run-port-02/nodes/fail02/activate.log inst/run-port-02/nodes/hold02/activate.log -inst/run-port-02/nodes/ipaddr02/activate.log inst/run-port-02/nodes/nmap02/activate.log inst/run-port-02/nodes/pass02/activate.log inst/run-port-02/nodes/ping02/activate.log inst/run-port-03/nodes/fail03/activate.log inst/run-port-03/nodes/hold03/activate.log -inst/run-port-03/nodes/ipaddr03/activate.log inst/run-port-03/nodes/nmap03/activate.log inst/run-port-03/nodes/pass03/activate.log inst/run-port-03/nodes/ping03/activate.log diff --git a/testing/test_aux.sh b/testing/test_aux.sh index f78d30f506..23861b04b1 100755 --- a/testing/test_aux.sh +++ b/testing/test_aux.sh @@ -121,7 +121,7 @@ more inst/run-port-*/scans/ip_triggers.txt | cat dhcp_done=$(fgrep done inst/run-port-01/scans/ip_triggers.txt | wc -l) dhcp_long=$(fgrep long inst/run-port-01/scans/ip_triggers.txt | wc -l) echo dhcp requests $((dhcp_done > 1)) $((dhcp_done < 3)) \ - $((dhcp_long > 1)) $((dhcp_long < 4)) | tee -a $TEST_RESULTS + $((dhcp_long >= 1)) $((dhcp_long < 4)) | tee -a $TEST_RESULTS sort inst/result.log | tee -a $TEST_RESULTS # Show partial logs from each test diff --git a/testing/test_base.out b/testing/test_base.out index d0181c88d0..7ad42c6cce 100644 --- a/testing/test_base.out +++ b/testing/test_base.out @@ -87,17 +87,6 @@ RESULT pass base.target.ping target reached ``` -## Module ipaddr - - -#### Module Config - -|Attribute|Value| -|---|---| -|enabled|True| -|timeout_sec|300| -|port_flap_timeout_sec|20| - ## Module nmap diff --git a/testing/test_many.out b/testing/test_many.out index 3e8525aa5e..29cc0367df 100644 --- a/testing/test_many.out +++ b/testing/test_many.out @@ -5,6 +5,7 @@ Enough results: 1 Enough DHCP timeouts: 1 Enough static ips: 1 Enough ipaddr tests: 1 +Enough alternate subnet ips: 1 Enough ipaddr timeouts: 1 Redacted soak diff No soak report diff diff --git a/testing/test_many.sh b/testing/test_many.sh index 7befe66a76..c63c99a182 100755 --- a/testing/test_many.sh +++ b/testing/test_many.sh @@ -20,6 +20,7 @@ echo source config/system/default.yaml > local/system.conf echo monitor_scan_sec=5 >> local/system.conf echo switch_setup.uplink_port=$((NUM_DEVICES+1)) >> local/system.conf echo gcp_cred=$gcp_cred >> local/system.conf +echo dhcp_lease_time=120s >> local/system.conf for iface in $(seq 1 $NUM_DEVICES); do xdhcp="" @@ -86,6 +87,7 @@ results=$(fgrep [] inst/result.log | wc -l) timeouts=$(fgrep "ipaddr:TimeoutError" inst/result.log | wc -l) ipaddr_timeouts=$(fgrep "ipaddr:TimeoutError" inst/result.log | wc -l) ip_notifications=$(fgrep "ip notification" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l) +alternate_subnet_ip=$(fgrep "ip notification 192.168" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l) cat inst/run-port-*/scans/ip_triggers.txt static_ips=$(fgrep nope inst/run-port-*/scans/ip_triggers.txt | wc -l) @@ -103,7 +105,8 @@ echo Enough results: $((results >= 5*RUN_LIMIT/10)) | tee -a $TEST_RESULTS echo Enough DHCP timeouts: $((timeouts >= NUM_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS echo Enough static ips: $((static_ips >= (NUM_NO_DHCP_DEVICES - NUM_TIMEOUT_DEVICES))) | tee -a $TEST_RESULTS -echo Enough ipaddr tests: $((ip_notifications >= (NUM_IPADDR_TEST_DEVICES - NUM_IPADDR_TEST_TIMEOUT_DEVICES) )) | tee -a $TEST_RESULTS +echo Enough ipaddr tests: $((ip_notifications >= (NUM_IPADDR_TEST_DEVICES - NUM_IPADDR_TEST_TIMEOUT_DEVICES) * 2 )) | tee -a $TEST_RESULTS +echo Enough alternate subnet ips: $((alternate_subnet_ip >= (NUM_IPADDR_TEST_DEVICES - NUM_IPADDR_TEST_TIMEOUT_DEVICES) )) | tee -a $TEST_RESULTS echo Enough ipaddr timeouts: $((ipaddr_timeouts >= NUM_IPADDR_TEST_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS echo bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time count=2 From adb4c254e1fdfc1fc060321821766071bd1ca34e Mon Sep 17 00:00:00 2001 From: pbatta Date: Thu, 16 Jul 2020 23:55:41 -0700 Subject: [PATCH 28/38] Use multiple NTP requests and the monitor pcap capture, to reduce flakiness in NTP tests (#541) --- docker/include/bin/start_faux | 20 +++++++++++++------- docker/include/bin/test_ping | 4 ++-- testing/test_aux.sh | 2 +- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/docker/include/bin/start_faux b/docker/include/bin/start_faux index 15c85bbf4e..bb80df1507 100755 --- a/docker/include/bin/start_faux +++ b/docker/include/bin/start_faux @@ -138,13 +138,19 @@ fi # Queries the NTP server learnt from DHCP. if [ -n "${options[ntpv4]}" ]; then - dhcp_ntp=$(fgrep NTPSERVERS= /run/ntpdate.dhcp) - ntp_server=`echo $dhcp_ntp | cut -d "'" -f 2` - echo Transmitting NTP query to $ntp_server using NTPv4 - ntpdate -q -o 4 $ntp_server & + (while date; do + dhcp_ntp=$(fgrep NTPSERVERS= /run/ntpdate.dhcp) + ntp_server=`echo $dhcp_ntp | cut -d "'" -f 2` + echo Transmitting NTP query to $ntp_server using NTPv4 + ntpdate -q -o 4 $ntp_server + sleep 5 + done) & elif [ -n "${options[ntpv3]}" ]; then - echo Transmitting NTP query to time.google.com using NTPv3 - ntpdate -q -o 3 time.google.com & + (while date; do + echo Transmitting NTP query to time.google.com using NTPv3 + ntpdate -q -o 3 time.google.com + sleep 5 + done) & fi # ntp_pass queries the NTP server learnt from DHCP. ntp_fail sends to time.google.com @@ -161,7 +167,7 @@ if [ -n "${options[ntp_pass]}" -o -n "${options[ntp_fail]}" ]; then fi echo Transmitting NTP query to $ntp_server ntpdate -q -p 1 $ntp_server - sleep 10 + sleep 5 done) & fi diff --git a/docker/include/bin/test_ping b/docker/include/bin/test_ping index 99bc957d83..b66393a079 100755 --- a/docker/include/bin/test_ping +++ b/docker/include/bin/test_ping @@ -88,8 +88,8 @@ echo Done with basic connectivity tests | tee -a $MONO_LOG echo Checking startup NTP ntp_target=${TARGET_IP%.*}.2 -ntp_request=`tcpdump -env -c 1 -r /scans/startup.pcap dst port 123 | wc -l` -ntp_proper=`tcpdump -env -c 1 -r /scans/startup.pcap dst port 123 and dst host $ntp_target | wc -l` +ntp_request=`tcpdump -env -c 1 -r /scans/monitor.pcap dst port 123 | wc -l` +ntp_proper=`tcpdump -env -c 1 -r /scans/monitor.pcap dst port 123 and dst host $ntp_target | wc -l` if [ "$ntp_request" == 0 ]; then ntp_result=skip ntp_summary="No NTP traffic detected" diff --git a/testing/test_aux.sh b/testing/test_aux.sh index 23861b04b1..edc08f149e 100755 --- a/testing/test_aux.sh +++ b/testing/test_aux.sh @@ -68,7 +68,7 @@ interfaces: faux-3: opts: tls macoui passwordpass bacnet pubber broadcast_client long_dhcp_response_sec: 0 -monitor_scan_sec: 0 +monitor_scan_sec: 20 EOF if [ -f "$gcp_cred" ]; then From b2bfed74c46a1ec377d40b4b876de789ef64b794 Mon Sep 17 00:00:00 2001 From: Trevor Date: Fri, 17 Jul 2020 18:22:24 -0700 Subject: [PATCH 29/38] Disable GCP combine check (#542) --- testing/test_many.gcp | 4 ---- testing/test_many.sh | 3 ++- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/testing/test_many.gcp b/testing/test_many.gcp index b24657de64..eccaaa37a2 100644 --- a/testing/test_many.gcp +++ b/testing/test_many.gcp @@ -1,6 +1,2 @@ Running testing/test_many.sh GCP results diff -5c5 -< Source: gcp ---- -> Source: local diff --git a/testing/test_many.sh b/testing/test_many.sh index c63c99a182..3dd796452b 100755 --- a/testing/test_many.sh +++ b/testing/test_many.sh @@ -126,7 +126,8 @@ if [ -f "$gcp_cred" ]; then bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time \ count=2 from_gcp=true echo GCP results diff | tee -a $GCP_RESULTS - diff inst/reports/combo_*.md out/report_local.md | tee -a $GCP_RESULTS + # TODO: Re-enable as per b/161529445 + # diff inst/reports/combo_*.md out/report_local.md | tee -a $GCP_RESULTS fi echo Done with many | tee -a $TEST_RESULTS From 98480217fc376c83aca0a5728b3b98a49e2597f7 Mon Sep 17 00:00:00 2001 From: henry54809 Date: Fri, 17 Jul 2020 18:39:19 -0700 Subject: [PATCH 30/38] Additional DHCP test part 3. Ip change test (#543) --- daq/host.py | 10 +--------- daq/ipaddr_test.py | 5 +++++ testing/test_dhcp.out | 5 ++++- testing/test_dhcp.sh | 40 ++++++++++++++++++++++++++++++++-------- 4 files changed, 42 insertions(+), 18 deletions(-) diff --git a/daq/host.py b/daq/host.py index 50d8fe224e..ee1e1a7e21 100644 --- a/daq/host.py +++ b/daq/host.py @@ -54,12 +54,6 @@ def pre_states(): """Return pre-test states for basic operation""" return ['startup', 'sanity', 'acquire', 'base', 'monitor'] - -def dhcp_tests(): - """Returns all supported dhcp tests""" - return ['port_toggle', 'multi_subnet', 'ip_change'] - - def post_states(): """Return post-test states for recording finalization""" return ['finish', 'info', 'timer'] @@ -282,7 +276,7 @@ def _start_run(self): self._startup_scan() def _mark_skipped_tests(self): - for test in self.config['test_list'] + dhcp_tests(): + for test in self.config['test_list']: if not self._test_enabled(test): self._record_result(test, state=MODE.NOPE) @@ -442,8 +436,6 @@ def ip_notify(self, target_ip, state=MODE.DONE, delta_sec=-1): with open(self._trigger_path, 'a') as output_stream: output_stream.write('%s %s %d\n' % (target_ip, state, delta_sec)) self._all_ips.append({"ip": target_ip, "timestamp": time.time()}) - if self._get_dhcp_mode() == "ip_change" and len(self._all_ips) == 1: - self.gateway.request_new_ip(self.target_mac) # Update ip directly if it's already triggered. if self.target_ip: self.target_ip = target_ip diff --git a/daq/ipaddr_test.py b/daq/ipaddr_test.py index b56948d9e8..451192f75d 100644 --- a/daq/ipaddr_test.py +++ b/daq/ipaddr_test.py @@ -28,6 +28,7 @@ def __init__(self, host, target_port, tmpdir, test_name, module_config): self.tests = [ ('dhcp port_toggle test', self._dhcp_port_toggle_test), ('dhcp multi subnet test', self._multi_subnet_test), + ('ip change test', self._ip_change_test), ('finalize', self._finalize) ] @@ -69,6 +70,10 @@ def _multi_subnet_test(self): self.host.gateway.change_dhcp_range(*dhcp_range) self._ip_callback = self._multi_subnet_test if self.test_dhcp_ranges else self._next_test + def _ip_change_test(self): + self.host.gateway.request_new_ip(self.host.target_mac) + self._ip_callback = self._next_test + def _finalize(self, exception=None): self.terminate() self.callback(exception=exception) diff --git a/testing/test_dhcp.out b/testing/test_dhcp.out index e7bf981faa..31148ca8b6 100644 --- a/testing/test_dhcp.out +++ b/testing/test_dhcp.out @@ -4,9 +4,12 @@ DHCP Tests 02: ['02:acquire:TimeoutError'] 03: [] 04: [] +05: [] Device 1 ip triggers: 1 0 Device 2 ip triggers: 0 0 Device 3 long ip triggers: 1 Device 4 ip triggers: 1 -Number of ips: 2 +Device 4 subnet 1 ip: 1 subnet 2 ip: 1 subnet 3 ip: 2 +Device 5 ip triggers: 1 +Device 5 num of ips: 2 Done with tests diff --git a/testing/test_dhcp.sh b/testing/test_dhcp.sh index c979de2a98..791b091669 100755 --- a/testing/test_dhcp.sh +++ b/testing/test_dhcp.sh @@ -7,11 +7,12 @@ echo DHCP Tests >> $TEST_RESULTS cat < local/system.conf source config/system/default.yaml site_description="Multi-Device Configuration" -switch_setup.uplink_port=5 +switch_setup.uplink_port=6 interfaces.faux-1.opts= interfaces.faux-2.opts=xdhcp interfaces.faux-3.opts= interfaces.faux-4.opts= +interfaces.faux-5.opts= monitor_scan_sec=1 EOF @@ -29,14 +30,31 @@ cat < local/site/mac_addrs/$intf_mac/module_config.json } EOF +# Multi subnet multi subnet tests intf_mac="9a02571e8f04" mkdir -p local/site/mac_addrs/$intf_mac cat < local/site/mac_addrs/$intf_mac/module_config.json { "modules": { "ipaddr": { - "timeout_sec": 320, - "dhcp_mode": "ip_change" + "enabled": true, + "port_flap_timeout_sec": 20, + "dhcp_ranges": [["192.168.0.1", "192.168.255.254", 16], ["10.255.255.1", "10.255.255.255", 24], ["172.16.0.1", "172.16.0.200", 24]] + } + } +} +EOF + +# ip change test +intf_mac="9a02571e8f05" +mkdir -p local/site/mac_addrs/$intf_mac +cat < local/site/mac_addrs/$intf_mac/module_config.json +{ + "modules": { + "ipaddr": { + "enabled": true, + "port_flap_timeout_sec": 20, + "dhcp_ranges": [] } } } @@ -47,7 +65,7 @@ cmd/run -b -s settle_sec=0 dhcp_lease_time=120s cat inst/result.log | sort | tee -a $TEST_RESULTS -for iface in $(seq 1 4); do +for iface in $(seq 1 5); do intf_mac=9a:02:57:1e:8f:0$iface ip_file=inst/run-port-0$iface/scans/ip_triggers.txt cat $ip_file @@ -55,11 +73,17 @@ for iface in $(seq 1 4); do long_triggers=$(fgrep long $ip_file | wc -l) num_ips=$(cat $ip_file | cut -d ' ' -f 1 | sort | uniq | wc -l) echo Found $ip_triggers ip triggers and $long_triggers long ip responses. - if [ $iface == 4 ]; then - echo "Device $iface ip triggers: $(((ip_triggers + long_triggers) >= 2))" | tee -a $TEST_RESULTS - echo "Number of ips: $num_ips" | tee -a $TEST_RESULTS + if [ $iface == 5 ]; then + echo "Device $iface ip triggers: $(((ip_triggers + long_triggers) >= 3))" | tee -a $TEST_RESULTS + echo "Device $iface num of ips: $num_ips" | tee -a $TEST_RESULTS + elif [ $iface == 4 ]; then + echo "Device $iface ip triggers: $(((ip_triggers + long_triggers) >= 4))" | tee -a $TEST_RESULTS + subnet_ip=$(fgrep "ip notification 192.168" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l) + subnet2_ip=$(fgrep "ip notification 10.255.255" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l) + subnet3_ip=$(fgrep "ip notification 172.16.0" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l) + echo "Device $iface subnet 1 ip: $subnet_ip subnet 2 ip: $subnet2_ip subnet 3 ip: $subnet3_ip" | tee -a $TEST_RESULTS elif [ $iface == 3 ]; then - echo "Device $iface long ip triggers: $((long_triggers > 0))" | tee -a $TEST_RESULTS + echo "Device $iface long ip triggers: $((long_triggers > 0))" | tee -a $TEST_RESULTS else echo "Device $iface ip triggers: $((ip_triggers > 0)) $((long_triggers > 0))" | tee -a $TEST_RESULTS fi From 198a130d400102c4344c3b29dfc827097194217c Mon Sep 17 00:00:00 2001 From: Nour Date: Sun, 19 Jul 2020 11:52:40 +0100 Subject: [PATCH 31/38] Add sshv1 to faux, update tests in tests_aux to check --- docker/include/bin/start_faux | 7 ++-- docker/modules/Dockerfile.faux1 | 52 +++++++++++++++++++++++----- subset/security/ssh_additions.config | 5 --- testing/test_aux.out | 4 +-- testing/test_aux.sh | 2 +- 5 files changed, 52 insertions(+), 18 deletions(-) delete mode 100644 subset/security/ssh_additions.config diff --git a/docker/include/bin/start_faux b/docker/include/bin/start_faux index 63a3a95792..351213d8ea 100755 --- a/docker/include/bin/start_faux +++ b/docker/include/bin/start_faux @@ -126,10 +126,13 @@ if [ -n "${options[telnet]}" ]; then (while true; do echo Telnet `hostname`; nc -nvlt -p 23 -e `which hostname`; done) & fi -# SSH server is installed but not running if [ -n "${options[ssh]}" ]; then echo Starting SSH server - service ssh start + /usr/local/sbin/sshd +elif [ -n "${options[sshv1]}" ]; then + echo Starting SSHv1 server + echo 'Protocol 1' >> /usr/local/etc/sshd_config + /usr/local/sbin/sshd fi if [ -n "${options[bacnet]}" ]; then diff --git a/docker/modules/Dockerfile.faux1 b/docker/modules/Dockerfile.faux1 index 7c8e9c0be0..01e3364b40 100644 --- a/docker/modules/Dockerfile.faux1 +++ b/docker/modules/Dockerfile.faux1 @@ -18,13 +18,54 @@ RUN bin/retry_cmd git clone https://github.com/grafnu/bacnet4j.git --single-bran COPY pubber/ pubber/ RUN pubber/bin/build +# Dockerfile to build older version of SSH and SSL +FROM daqf/aardvark:latest as ssh_build + +RUN $AG update && $AG install wget make build-essential gcc libz-dev ca-certificates + +# Build SSH, OpenSSL from source and configure +RUN wget https://www.openssl.org/source/openssl-1.0.2g.tar.gz && \ + tar -xzf openssl-1.0.2g.tar.gz && \ + cd openssl-1.0.2g && \ + ./config --prefix=/usr/local/openssl --openssldir=/usr/local/openssl && \ + make && \ + make install && \ + cd .. && \ + mkdir /etc/ssh && \ + mkdir /var/empty && \ + chown root:sys /var/empty && \ + chmod 755 /var/empty && \ + groupadd sshd && \ + useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd && \ + wget https://mirrors.mit.edu/pub/OpenBSD/OpenSSH/portable/openssh-7.2p1.tar.gz && \ + tar -xzf openssh-7.2p1.tar.gz && \ + cd openssh-7.2p1 && \ + ./configure --with-ssl-dir=/usr/local/openssl --with-ssh1 && \ + make && \ + make install + +#This will install the OpenSSH binaries in /usr/local/bin, configuration files +#in /usr/local/etc, the server in /usr/local/sbin, etc. To specify a different +#installation prefix, use the --prefix option to configure: + FROM daqf/aardvark:latest +COPY --from=ssh_build /usr/local/openssl/* /usr/local/openssl/ +COPY --from=ssh_build /usr/local/sbin/* /usr/local/sbin/ +COPY --from=ssh_build /usr/local/bin/* /usr/local/bin/ +COPY --from=ssh_build /usr/local/etc/* /usr/local/etc/ + +RUN mkdir /var/empty && \ + chown root:sys /var/empty && \ + chmod 755 /var/empty && \ + groupadd sshd && \ + useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd + # Run this separately so it can be shared with other builds. RUN $AG update && $AG install openjdk-8-jre RUN $AG update && $AG install openjdk-8-jdk git RUN $AG update && $AG install isc-dhcp-client ethtool network-manager netcat curl\ - python ifupdown openssl ssh nano apache2-utils ntpdate + python ifupdown openssl nano apache2-utils ntpdate # Additional OS dependencies RUN $AG update && $AG install -y telnetd && $AG install xinetd nginx @@ -36,6 +77,8 @@ COPY subset/network/TransportClient TransportClient # Prefetch resolvconf to dynamically install at runtime in start_faux. RUN $AG update && cd /tmp && ln -s ~/bin bin && $AG download resolvconf && mv resolvconf_*.deb ~ +RUN /usr/local/bin/ssh-keygen -A + COPY docker/include/bin/start_faux docker/include/bin/failing bin/ COPY --from=java_build /root/bacnet4j/*.jar bacnet4j/ @@ -52,18 +95,11 @@ COPY subset/bacnet/bacnetTests/src/main/resources/Faux*.json tmp/ COPY --from=java_build /root/bacnet4j/bacnet4j-1.0-SNAPSHOT-all.jar bacnetTests/libs/ RUN cd bacnetTests && ./gradlew build -# SSH dependency -COPY subset/security/ssh_additions.config ssh_additions.config -RUN cat ssh_additions.config >> /etc/ssh/sshd_config - # HTTP/HTTPS dependency COPY subset/security/nginxpass.conf /root/nginx/ COPY subset/security/nginxfail.conf /root/nginx/ COPY subset/security/nginx-site /var/www/nginx-site -# SSH login fix. Otherwise user is kicked off after login -RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd - # Weird workaround for problem running tcdump in a privlidged container. RUN mv /usr/sbin/tcpdump /usr/bin/tcpdump diff --git a/subset/security/ssh_additions.config b/subset/security/ssh_additions.config deleted file mode 100644 index 7e8895f7c9..0000000000 --- a/subset/security/ssh_additions.config +++ /dev/null @@ -1,5 +0,0 @@ -Port 22 -ListenAddress 0.0.0.0 -PermitRootLogin yes -PasswordAuthentication yes -KexAlgorithms diffie-hellman-group1-sha1,diffie-hellman-group-exchange-sha1 diff --git a/testing/test_aux.out b/testing/test_aux.out index 98d51c0919..e80a78782c 100644 --- a/testing/test_aux.out +++ b/testing/test_aux.out @@ -58,8 +58,8 @@ RESULT pass security.passwords.ssh Default passwords have been changed. RESULT skip security.firmware Could not retrieve a firmware version with nmap. Check bacnet port. RESULT pass security.firmware version found: ?\xFF\xFF\x19,>u\x08\x00no RESULT pass security.ssh.version Device only supports SSHv2 -RESULT pass security.ssh.version Device only supports SSHv2 -RESULT pass security.ssh.version Device only supports SSHv2 +RESULT fail security.ssh.version Device supports SSHv1 +RESULT skip security.ssh.version Device is not running an SSH server dhcp requests 1 1 0 1 01: [] 02: ['02:macoui:TimeoutError', '02:ping:TimeoutError'] diff --git a/testing/test_aux.sh b/testing/test_aux.sh index 0c0a384fa0..83fc440b75 100755 --- a/testing/test_aux.sh +++ b/testing/test_aux.sh @@ -64,7 +64,7 @@ interfaces: faux-1: opts: brute broadcast_client ntp_pass ssh faux-2: - opts: nobrute expiredtls bacnetfail pubber passwordfail ntp_fail opendns + opts: nobrute expiredtls bacnetfail pubber passwordfail ntp_fail opendns sshv1 faux-3: opts: tls macoui passwordpass bacnet pubber broadcast_client long_dhcp_response_sec: 0 From ad1c1416387915eb9ad8ade49455ca007242fa78 Mon Sep 17 00:00:00 2001 From: Nour Date: Sun, 19 Jul 2020 15:48:45 +0100 Subject: [PATCH 32/38] move test from test_aux to test_modules, add blank line eof --- docs/device_report.md | 73 +++++++++++++------------------------- subset/security/build.conf | 2 +- testing/test_aux.out | 44 ++++++++++++++++------- testing/test_aux.sh | 12 +++---- testing/test_modules.out | 6 ++++ testing/test_modules.sh | 3 ++ 6 files changed, 71 insertions(+), 69 deletions(-) diff --git a/docs/device_report.md b/docs/device_report.md index ea6c43a5ca..4a5be26839 100644 --- a/docs/device_report.md +++ b/docs/device_report.md @@ -56,7 +56,7 @@ Overall device result FAIL |---|---|---|---|---| |Required|1|0|0|0| |Recommended|2|0|0|0| -|Other|3|2|21|2| +|Other|2|2|22|2| |Result|Test|Category|Expectation|Notes| |---|---|---|---|---| @@ -67,6 +67,7 @@ Overall device result FAIL |skip|cloud.udmi.state|Other|Other|No device id| |skip|cloud.udmi.system|Other|Other|No device id| |fail|connection.mac_oui|Other|Other|Manufacturer prefix not found!| +|pass|connection.network.ntp_support|Other|Other|Using NTPv4.| |skip|connection.port_duplex|Other|Other|No local IP has been set, check system config| |skip|connection.port_link|Other|Other|No local IP has been set, check system config| |skip|connection.port_speed|Other|Other|No local IP has been set, check system config| @@ -79,10 +80,9 @@ Overall device result FAIL |skip|security.firmware|Other|Other|Could not retrieve a firmware version with nmap. Check bacnet port.| |skip|security.passwords.http|Other|Other|Port 80 is not open on target device.| |skip|security.passwords.https|Other|Other|Port 443 is not open on target device.| -|pass|security.passwords.ssh|Other|Other|Default passwords have been changed.| +|skip|security.passwords.ssh|Other|Other|Port 22 is not open on target device.| |skip|security.passwords.telnet|Other|Other|Port 23 is not open on target device.| |pass|security.ports.nmap|Security|Recommended|Only allowed ports found open.| -|pass|security.ssh.version|Other|Other|Device only supports SSHv2| |skip|security.tls.v1|Other|Other|IOException unable to connect to server| |skip|security.tls.v1.x509|Other|Other|IOException unable to connect to server| |skip|security.tls.v1_2|Other|Other|IOException unable to connect to server| @@ -93,15 +93,6 @@ Overall device result FAIL |gone|unknown.fake.monkey|Other|Other|| -## Module ipaddr - - -#### Module Config - -|Attribute|Value| -|---|---| -|timeout_sec|300| - ## Module pass @@ -421,7 +412,6 @@ Nmap scan report for daq-faux-1 (X.X.X.X) Host is up (XXX). PORT STATE SERVICE -22/tcp open ssh 10000/tcp open snet-sensor-mgmt MAC Address: 9A:02:57:1E:8F:01 (Unknown) @@ -444,7 +434,6 @@ Nmap scan report for daq-faux-1 (X.X.X.X) Host is up (XXX). PORT STATE SERVICE -22/tcp open ssh 10000/tcp open snet-sensor-mgmt MAC Address: 9A:02:57:1E:8F:01 (Unknown) @@ -467,7 +456,6 @@ Nmap scan report for daq-faux-1 (X.X.X.X) Host is up (XXX). PORT STATE SERVICE -22/tcp open ssh 10000/tcp open snet-sensor-mgmt MAC Address: 9A:02:57:1E:8F:01 (Unknown) @@ -490,47 +478,14 @@ Nmap scan report for daq-faux-1 (X.X.X.X) Host is up (XXX). PORT STATE SERVICE -22/tcp open ssh 10000/tcp open snet-sensor-mgmt MAC Address: 9A:02:57:1E:8F:01 (Unknown) Nmap done: 1 IP address (1 host up) scanned in XXX nmap X.X.X.X -Starting brute force... -hydra -L /tmp/ssh_usernames.txt -P /tmp/ssh_passwords.txt X.X.X.X ssh -s 22 -Hydra v8.6 (c) 2017 by van Hauser/THC - Please do not use in military or secret service organizations, or for illegal purposes. - -Hydra (http://www.thc.org/thc-hydra) starting at XXX -[DATA] max 6 tasks per 1 server, overall 6 tasks, 6 login tries (l:2/p:3), ~1 try per task -[DATA] attacking ssh://X.X.X.X:22/ -1 of 1 target completed, 0 valid passwords found -Hydra (http://www.thc.org/thc-hydra) finished at XXX Done. -------------------- -RESULT pass security.passwords.ssh Default passwords have been changed. - -``` - -#### Module Config - -|Attribute|Value| -|---|---| -|enabled|True| - -## Module ssh - - -#### Report - -``` --------------------- -security.ssh.version --------------------- -Check that device only support SSHv2 --------------------- -22/tcp open ssh OpenSSH 7.6p1 Ubuntu 4ubuntu0.3 (Ubuntu Linux; protocol 2.0) --------------------- -RESULT pass security.ssh.version Device only supports SSHv2 +RESULT skip security.passwords.ssh Port 22 is not open on target device. ``` @@ -604,5 +559,25 @@ RESULT pass manual.test.travis Manual test - for testing |---|---| |enabled|True| +## Module ntp + + +#### Report + +``` +-------------------- +connection.network.ntp_support +-------------------- +Device supports NTP version 4. +-------------------- +RESULT pass connection.network.ntp_support Using NTPv4. +``` + +#### Module Config + +|Attribute|Value| +|---|---| +|enabled|True| + ## Report complete diff --git a/subset/security/build.conf b/subset/security/build.conf index 5b7e9ed28d..26876f4343 100644 --- a/subset/security/build.conf +++ b/subset/security/build.conf @@ -1,4 +1,4 @@ build subset/security add tls add password -add ssh \ No newline at end of file +add ssh diff --git a/testing/test_aux.out b/testing/test_aux.out index e80a78782c..b46c806282 100644 --- a/testing/test_aux.out +++ b/testing/test_aux.out @@ -46,7 +46,7 @@ RESULT pass security.tls.v1_3.x509 Certificate active for current date and publi RESULT skip security.passwords.http Port 80 is not open on target device. RESULT skip security.passwords.https Port 443 is not open on target device. RESULT skip security.passwords.telnet Port 23 is not open on target device. -RESULT pass security.passwords.ssh Default passwords have been changed. +RESULT skip security.passwords.ssh Port 22 is not open on target device. RESULT fail security.passwords.http Default passwords have not been changed. RESULT fail security.passwords.https Default passwords have not been changed. RESULT fail security.passwords.telnet Default passwords have not been changed. @@ -57,10 +57,10 @@ RESULT pass security.passwords.telnet Default passwords have been changed. RESULT pass security.passwords.ssh Default passwords have been changed. RESULT skip security.firmware Could not retrieve a firmware version with nmap. Check bacnet port. RESULT pass security.firmware version found: ?\xFF\xFF\x19,>u\x08\x00no -RESULT pass security.ssh.version Device only supports SSHv2 -RESULT fail security.ssh.version Device supports SSHv1 -RESULT skip security.ssh.version Device is not running an SSH server -dhcp requests 1 1 0 1 +RESULT pass connection.network.ntp_support Using NTPv4. +RESULT fail connection.network.ntp_support Not using NTPv4. +RESULT skip connection.network.ntp_support No NTP packets received. +dhcp requests 1 1 1 1 01: [] 02: ['02:macoui:TimeoutError', '02:ping:TimeoutError'] 03: [] @@ -94,7 +94,16 @@ port-01 module_config modules "enabled": false }, "ipaddr": { - "timeout_sec": 300 + "dhcp_ranges": [ + [ + "192.168.0.1", + "192.168.255.254", + 16 + ] + ], + "enabled": false, + "port_flap_timeout_sec": 20, + "timeout_sec": 900 }, "macoui": { "enabled": true @@ -108,13 +117,13 @@ port-01 module_config modules "nmap": { "enabled": true }, - "pass": { + "ntp": { "enabled": true }, - "password": { + "pass": { "enabled": true }, - "ssh": { + "password": { "enabled": true }, "switch": { @@ -145,7 +154,16 @@ port-02 module_config modules "enabled": true }, "ipaddr": { - "timeout_sec": 300 + "dhcp_ranges": [ + [ + "192.168.0.1", + "192.168.255.254", + 16 + ] + ], + "enabled": false, + "port_flap_timeout_sec": 20, + "timeout_sec": 900 }, "macoui": { "enabled": true, @@ -160,15 +178,15 @@ port-02 module_config modules "nmap": { "enabled": true }, + "ntp": { + "enabled": true + }, "pass": { "enabled": false }, "password": { "enabled": true }, - "ssh": { - "enabled": true - }, "switch": { "enabled": true }, diff --git a/testing/test_aux.sh b/testing/test_aux.sh index 83fc440b75..0aaa52f11a 100755 --- a/testing/test_aux.sh +++ b/testing/test_aux.sh @@ -62,13 +62,13 @@ site_path: inst/test_site schema_path: schemas/udmi interfaces: faux-1: - opts: brute broadcast_client ntp_pass ssh + opts: brute broadcast_client ntpv4 ssh faux-2: - opts: nobrute expiredtls bacnetfail pubber passwordfail ntp_fail opendns sshv1 + opts: nobrute expiredtls bacnetfail pubber passwordfail ntpv3 opendns ssh faux-3: - opts: tls macoui passwordpass bacnet pubber broadcast_client + opts: tls macoui passwordpass bacnet pubber broadcast_client ssh long_dhcp_response_sec: 0 -monitor_scan_sec: 0 +monitor_scan_sec: 20 EOF if [ -f "$gcp_cred" ]; then @@ -114,14 +114,14 @@ capture_test_results tls capture_test_results password capture_test_results discover capture_test_results network -capture_test_results ssh +capture_test_results ntp # Capture peripheral logs more inst/run-port-*/scans/ip_triggers.txt | cat dhcp_done=$(fgrep done inst/run-port-01/scans/ip_triggers.txt | wc -l) dhcp_long=$(fgrep long inst/run-port-01/scans/ip_triggers.txt | wc -l) echo dhcp requests $((dhcp_done > 1)) $((dhcp_done < 3)) \ - $((dhcp_long > 1)) $((dhcp_long < 4)) | tee -a $TEST_RESULTS + $((dhcp_long >= 1)) $((dhcp_long < 4)) | tee -a $TEST_RESULTS sort inst/result.log | tee -a $TEST_RESULTS # Show partial logs from each test diff --git a/testing/test_modules.out b/testing/test_modules.out index ff067a6826..f0c130ddd3 100644 --- a/testing/test_modules.out +++ b/testing/test_modules.out @@ -31,4 +31,10 @@ Testing nmap bacnet RESULT pass security.ports.nmap Only allowed ports found open. Testing nmap telnet RESULT fail security.ports.nmap Some disallowed ports are open: 23 +Testing ssh +RESULT skip security.ssh.version Device is not running an SSH server +Testing ssh ssh +RESULT pass security.ssh.version Device only supports SSHv2 +Testing ssh sshv1 +RESULT fail security.ssh.version Device supports SSHv1 Testing complete. diff --git a/testing/test_modules.sh b/testing/test_modules.sh index bc123230e1..f703b75c77 100755 --- a/testing/test_modules.sh +++ b/testing/test_modules.sh @@ -17,6 +17,9 @@ tls alt expiredtls nmap nmap bacnet nmap telnet +ssh +ssh ssh +ssh sshv1 EOF DAQ_TARGETS=aardvark,faux1,faux2 bin/docker_build force inline From 15410e9b43dc70a1f1a1dcd6f7e2e8f8e2bf587e Mon Sep 17 00:00:00 2001 From: Nour Date: Sun, 19 Jul 2020 15:59:55 +0100 Subject: [PATCH 33/38] remove ssh from test_aux --- resources/test_site/module_config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/test_site/module_config.json b/resources/test_site/module_config.json index 8b7c157ed4..3ff4030a4c 100644 --- a/resources/test_site/module_config.json +++ b/resources/test_site/module_config.json @@ -18,7 +18,7 @@ "manual": { "enabled": true }, - "ssh": { + "ntp": { "enabled": true } }, From d5b97edc115e6d5b959ff1d0afbfbca6d64e13d6 Mon Sep 17 00:00:00 2001 From: Nour Date: Sun, 19 Jul 2020 16:38:12 +0100 Subject: [PATCH 34/38] make 'make' silent in build, disable ssh in test site --- docker/modules/Dockerfile.faux1 | 12 ++++-------- resources/test_site/module_config.json | 3 +++ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/docker/modules/Dockerfile.faux1 b/docker/modules/Dockerfile.faux1 index 01e3364b40..61f75de306 100644 --- a/docker/modules/Dockerfile.faux1 +++ b/docker/modules/Dockerfile.faux1 @@ -28,8 +28,8 @@ RUN wget https://www.openssl.org/source/openssl-1.0.2g.tar.gz && \ tar -xzf openssl-1.0.2g.tar.gz && \ cd openssl-1.0.2g && \ ./config --prefix=/usr/local/openssl --openssldir=/usr/local/openssl && \ - make && \ - make install && \ + make -s && \ + make -s install && \ cd .. && \ mkdir /etc/ssh && \ mkdir /var/empty && \ @@ -41,12 +41,8 @@ RUN wget https://www.openssl.org/source/openssl-1.0.2g.tar.gz && \ tar -xzf openssh-7.2p1.tar.gz && \ cd openssh-7.2p1 && \ ./configure --with-ssl-dir=/usr/local/openssl --with-ssh1 && \ - make && \ - make install - -#This will install the OpenSSH binaries in /usr/local/bin, configuration files -#in /usr/local/etc, the server in /usr/local/sbin, etc. To specify a different -#installation prefix, use the --prefix option to configure: + make -s && \ + make -s install FROM daqf/aardvark:latest diff --git a/resources/test_site/module_config.json b/resources/test_site/module_config.json index 3ff4030a4c..e1f4050758 100644 --- a/resources/test_site/module_config.json +++ b/resources/test_site/module_config.json @@ -18,6 +18,9 @@ "manual": { "enabled": true }, + "ssh": { + "enabled": false + }, "ntp": { "enabled": true } From 4cbd034128f9549ebbab4b80a5618a55e323896c Mon Sep 17 00:00:00 2001 From: Nour Date: Sun, 19 Jul 2020 17:14:45 +0100 Subject: [PATCH 35/38] fix remove ssh, prevent existing password test from changing --- resources/test_site/module_config.json | 3 +++ testing/test_aux.sh | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/resources/test_site/module_config.json b/resources/test_site/module_config.json index 3ff4030a4c..ac17f224b1 100644 --- a/resources/test_site/module_config.json +++ b/resources/test_site/module_config.json @@ -20,6 +20,9 @@ }, "ntp": { "enabled": true + }, + "ssh": { + "enabled": false } }, "process": { diff --git a/testing/test_aux.sh b/testing/test_aux.sh index f450ec4605..0aaa52f11a 100755 --- a/testing/test_aux.sh +++ b/testing/test_aux.sh @@ -62,9 +62,9 @@ site_path: inst/test_site schema_path: schemas/udmi interfaces: faux-1: - opts: brute broadcast_client ntpv4 + opts: brute broadcast_client ntpv4 ssh faux-2: - opts: nobrute expiredtls bacnetfail pubber passwordfail ntpv3 opendns + opts: nobrute expiredtls bacnetfail pubber passwordfail ntpv3 opendns ssh faux-3: opts: tls macoui passwordpass bacnet pubber broadcast_client ssh long_dhcp_response_sec: 0 From 2d9cd2b3125f009bfbacfce30db0c4e8e1db2222 Mon Sep 17 00:00:00 2001 From: Nour Date: Sun, 19 Jul 2020 19:12:54 +0100 Subject: [PATCH 36/38] fix test_aux --- testing/test_aux.out | 6 ++++++ testing/test_aux.sh | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/testing/test_aux.out b/testing/test_aux.out index b46c806282..75933922ee 100644 --- a/testing/test_aux.out +++ b/testing/test_aux.out @@ -126,6 +126,9 @@ port-01 module_config modules "password": { "enabled": true }, + "ssh": { + "enabled": false + }, "switch": { "enabled": true, "poe": { @@ -187,6 +190,9 @@ port-02 module_config modules "password": { "enabled": true }, + "ssh": { + "enabled": false + }, "switch": { "enabled": true }, diff --git a/testing/test_aux.sh b/testing/test_aux.sh index 0aaa52f11a..cc97d80a22 100755 --- a/testing/test_aux.sh +++ b/testing/test_aux.sh @@ -62,7 +62,7 @@ site_path: inst/test_site schema_path: schemas/udmi interfaces: faux-1: - opts: brute broadcast_client ntpv4 ssh + opts: brute broadcast_client ntpv4 faux-2: opts: nobrute expiredtls bacnetfail pubber passwordfail ntpv3 opendns ssh faux-3: From 6546e107067c867ed405f90eb2b7687eb91e2720 Mon Sep 17 00:00:00 2001 From: Nour Date: Mon, 20 Jul 2020 10:42:01 +0100 Subject: [PATCH 37/38] Move build scripts into seperate files --- docker/modules/Dockerfile.faux1 | 46 ++++++++------------------ subset/security/sshfaux/ssh_build.sh | 28 ++++++++++++++++ subset/security/sshfaux/ssh_privsep.sh | 11 ++++++ 3 files changed, 52 insertions(+), 33 deletions(-) create mode 100644 subset/security/sshfaux/ssh_build.sh create mode 100644 subset/security/sshfaux/ssh_privsep.sh diff --git a/docker/modules/Dockerfile.faux1 b/docker/modules/Dockerfile.faux1 index 61f75de306..69f7c0edef 100644 --- a/docker/modules/Dockerfile.faux1 +++ b/docker/modules/Dockerfile.faux1 @@ -18,45 +18,18 @@ RUN bin/retry_cmd git clone https://github.com/grafnu/bacnet4j.git --single-bran COPY pubber/ pubber/ RUN pubber/bin/build -# Dockerfile to build older version of SSH and SSL +# Seperate stage to build older version of SSH and SSL FROM daqf/aardvark:latest as ssh_build RUN $AG update && $AG install wget make build-essential gcc libz-dev ca-certificates # Build SSH, OpenSSL from source and configure -RUN wget https://www.openssl.org/source/openssl-1.0.2g.tar.gz && \ - tar -xzf openssl-1.0.2g.tar.gz && \ - cd openssl-1.0.2g && \ - ./config --prefix=/usr/local/openssl --openssldir=/usr/local/openssl && \ - make -s && \ - make -s install && \ - cd .. && \ - mkdir /etc/ssh && \ - mkdir /var/empty && \ - chown root:sys /var/empty && \ - chmod 755 /var/empty && \ - groupadd sshd && \ - useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd && \ - wget https://mirrors.mit.edu/pub/OpenBSD/OpenSSH/portable/openssh-7.2p1.tar.gz && \ - tar -xzf openssh-7.2p1.tar.gz && \ - cd openssh-7.2p1 && \ - ./configure --with-ssl-dir=/usr/local/openssl --with-ssh1 && \ - make -s && \ - make -s install +COPY subset/security/sshfaux/*.sh ./ +RUN sh ssh_build.sh + FROM daqf/aardvark:latest -COPY --from=ssh_build /usr/local/openssl/* /usr/local/openssl/ -COPY --from=ssh_build /usr/local/sbin/* /usr/local/sbin/ -COPY --from=ssh_build /usr/local/bin/* /usr/local/bin/ -COPY --from=ssh_build /usr/local/etc/* /usr/local/etc/ - -RUN mkdir /var/empty && \ - chown root:sys /var/empty && \ - chmod 755 /var/empty && \ - groupadd sshd && \ - useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd - # Run this separately so it can be shared with other builds. RUN $AG update && $AG install openjdk-8-jre RUN $AG update && $AG install openjdk-8-jdk git @@ -73,8 +46,6 @@ COPY subset/network/TransportClient TransportClient # Prefetch resolvconf to dynamically install at runtime in start_faux. RUN $AG update && cd /tmp && ln -s ~/bin bin && $AG download resolvconf && mv resolvconf_*.deb ~ -RUN /usr/local/bin/ssh-keygen -A - COPY docker/include/bin/start_faux docker/include/bin/failing bin/ COPY --from=java_build /root/bacnet4j/*.jar bacnet4j/ @@ -96,6 +67,15 @@ COPY subset/security/nginxpass.conf /root/nginx/ COPY subset/security/nginxfail.conf /root/nginx/ COPY subset/security/nginx-site /var/www/nginx-site +COPY --from=ssh_build /usr/local/openssl/* /usr/local/openssl/ +COPY --from=ssh_build /usr/local/sbin/* /usr/local/sbin/ +COPY --from=ssh_build /usr/local/bin/* /usr/local/bin/ +COPY --from=ssh_build /usr/local/etc/* /usr/local/etc/ + +COPY subset/security/sshfaux/ssh_privsep.sh ssh_privsep.sh +RUN sh ssh_privsep.sh +RUN /usr/local/bin/ssh-keygen -A + # Weird workaround for problem running tcdump in a privlidged container. RUN mv /usr/sbin/tcpdump /usr/bin/tcpdump diff --git a/subset/security/sshfaux/ssh_build.sh b/subset/security/sshfaux/ssh_build.sh new file mode 100644 index 0000000000..f870555f1d --- /dev/null +++ b/subset/security/sshfaux/ssh_build.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# +# Build older versions OpenSSL 1.0.2 and OpenSSH 7.2 +# Used for testing in faux devices only +# +# To run SSHD use /usr/local/sbin/sshd +# SSH components, e.g. ssh-keygen are found in /usr/local/bin +# SSH configuration and keys found in /usr/local/etc + +# Build OpenSSL 1.0.2 +wget https://www.openssl.org/source/openssl-1.0.2g.tar.gz +tar -xzf openssl-1.0.2g.tar.gz +cd openssl-1.0.2g +./config --prefix=/usr/local/openssl --openssldir=/usr/local/openssl +make -s +make -s install +cd .. + +# Prepare privellage seperation for SSHD +source ssh_privsep.sh + +# Build OpenSSH 7.2 +wget https://mirrors.mit.edu/pub/OpenBSD/OpenSSH/portable/openssh-7.2p1.tar.gz +tar -xzf openssh-7.2p1.tar.gz +cd openssh-7.2p1 +./configure --with-ssl-dir=/usr/local/openssl --with-ssh1 +make -s +make -s install diff --git a/subset/security/sshfaux/ssh_privsep.sh b/subset/security/sshfaux/ssh_privsep.sh new file mode 100644 index 0000000000..2127c62c41 --- /dev/null +++ b/subset/security/sshfaux/ssh_privsep.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# +# Prepare environment for running SSHD with privilege separation +# https://github.com/openssh/openssh-portable/blob/master/README.privsep + +mkdir /etc/ssh +mkdir /var/empty +chown root:sys /var/empty +chmod 755 /var/empty +groupadd sshd +useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd \ No newline at end of file From a16a1e13bdee7728d0da85297fa209db5e301288 Mon Sep 17 00:00:00 2001 From: Nour Date: Mon, 20 Jul 2020 11:32:40 +0100 Subject: [PATCH 38/38] add blank line eof --- subset/security/sshfaux/ssh_privsep.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/subset/security/sshfaux/ssh_privsep.sh b/subset/security/sshfaux/ssh_privsep.sh index 2127c62c41..668d825f9e 100644 --- a/subset/security/sshfaux/ssh_privsep.sh +++ b/subset/security/sshfaux/ssh_privsep.sh @@ -8,4 +8,4 @@ mkdir /var/empty chown root:sys /var/empty chmod 755 /var/empty groupadd sshd -useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd \ No newline at end of file +useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd