diff --git a/.checkstyle.xml b/.checkstyle.xml
index a214cc83f6..469a132853 100644
--- a/.checkstyle.xml
+++ b/.checkstyle.xml
@@ -42,7 +42,7 @@
-
+
diff --git a/.gitignore b/.gitignore
index ada16ef8f5..5e280a7725 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,16 +21,14 @@ validations/
*.save
# Runtime or sub-module files
-inst/
-faucet/
-forch/
-mininet/
-local/
-local_xxx
-local.old
-firebase/.firebaserc
-firebase/.firebase
-firebase/functions/package-lock.json
+/inst/
+/faucet/
+/forch/
+/mininet/
+/local/
+/firebase/.firebaserc
+/firebase/.firebase
+/firebase/functions/package-lock.json
nohup.out
**/node_modules/
.vscode/
diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml
new file mode 100644
index 0000000000..b9d18bf599
--- /dev/null
+++ b/.idea/codeStyles/codeStyleConfig.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
index b8fa9bb7a4..3fc93409e7 100644
--- a/.idea/vcs.xml
+++ b/.idea/vcs.xml
@@ -36,6 +36,10 @@
+
+
+
+
\ No newline at end of file
diff --git a/bin/alt_faucet b/bin/alt_faucet
new file mode 100755
index 0000000000..757ad64218
--- /dev/null
+++ b/bin/alt_faucet
@@ -0,0 +1,24 @@
+#!/bin/bash -e
+
+ROOT=$(realpath $(dirname $0)/..)
+cd $ROOT
+source etc/config_base.sh
+
+if [ -z "$switch_setup_ext_br" ]; then
+ echo switch_setup.ext_br not defined for alternate faucet setup.
+ false
+fi
+
+if [ -z "$switch_setup_alt_port" ]; then
+ echo switch_setup.alt_port not defined for alternate faucet setup.
+ false
+fi
+
+inst_name=$switch_setup_ext_br
+
+inst_dir=inst/faucet/daq-faucet-$inst_name
+mkdir -p $inst_dir
+cp config/faucet/faucet_$inst_name.yaml $inst_dir/faucet.yaml
+echo Launching alternate faucet install $inst_name on $switch_setup_alt_port
+echo DAQ autoclean docker kill daq-faucet-$inst_name
+cmd/faucet $inst_name $switch_setup_alt_port
diff --git a/bin/build_proto b/bin/build_proto
index ad07980e9e..cd20e074ef 100755
--- a/bin/build_proto
+++ b/bin/build_proto
@@ -32,6 +32,11 @@ sha1sum $proto_files > $WEB_ROOT/protos.hash
gen_path=$ROOT/protoc-gen-doc/bin/protoc-gen-doc
+if [ -d venv ]; then
+ echo Entering virtual python environment...
+ source venv/bin/activate
+fi
+
mkdir -p build/daq/proto build/proto
cp $proto_files build/daq/proto/
proto_files2=
@@ -56,3 +61,5 @@ mkdir -p libs/proto/
touch libs/proto/__init__.py
cp build/daq/proto/*.py libs/proto/
cp build/protos.html $WEB_ROOT/
+
+python3 -m grpc_tools.protoc -I usi/src/main/proto/ --python_out=libs/proto/ --grpc_python_out=libs/proto/ usi/src/main/proto/usi.proto
diff --git a/bin/external_ovs b/bin/external_ovs
index a289bd9cff..080fdcdecb 100755
--- a/bin/external_ovs
+++ b/bin/external_ovs
@@ -6,14 +6,19 @@ source etc/config_base.sh
ext_intf=$switch_setup_data_intf
ext_dpid=$switch_setup_of_dpid
-ext_ofpt=$switch_setup_lo_port
ext_brid=$switch_setup_ext_br
ext_brpt=$switch_setup_uplink_port
ext_pri=${ext_intf}
ext_sec=${ext_intf%-pri}-sec
-echo ext_dpid is $ext_dpid
+if [ -z "$switch_setup_alt_port" ]; then
+ ext_ofpt=$switch_setup_lo_port
+else
+ ext_ofpt=$switch_setup_alt_port
+fi
+
+echo ext_dpid is $ext_dpid on port $ext_ofpt
echo network_config is $network_config
dpid=$(printf %016x $ext_dpid)
diff --git a/bin/registrar b/bin/registrar
index e2278951e4..3f941ba188 100755
--- a/bin/registrar
+++ b/bin/registrar
@@ -27,4 +27,4 @@ validator/bin/build > /dev/null
echo Running tools version `git describe`
-validator/bin/registrar $project_id $site_path $schema_path $*
+validator/bin/registrar $project_id $site_path $schema_path $* 2>&1
diff --git a/bin/setup_dev b/bin/setup_dev
index 62223aa9c3..c39518dc96 100755
--- a/bin/setup_dev
+++ b/bin/setup_dev
@@ -75,10 +75,6 @@ $AG install \
python$PVERSION python3-pkg-resources python3-setuptools \
python$PVERSION-dev python3-pip python emacs-nox python$PVERSION-venv
-# Jump through some hoops for mininet, which still has some python2 deps.
-$AG install python-pip
-python2 -m pip install setuptools
-
if [ -d mininet ]; then
echo Checking mininet version matches $MININETV...
targetrev=$(cd mininet; git rev-parse $MININETV)
@@ -137,8 +133,9 @@ $PIP install --upgrade --index-url=https://pypi.python.org/simple Jinja2 \
google-api-core==1.16.0 \
google-cloud-storage==1.16.1 \
google-cloud-firestore==1.6.0 \
- google-cloud-logging==1.14.0
-
+ google-cloud-logging==1.14.0 \
+ grpcio-tools==1.30.0
+
$PIP freeze
echo Resetting .cache directory permissions...
test -n "$USER" && sudo chown $USER -R $HOME/.cache
diff --git a/bin/setup_testing b/bin/setup_testing
deleted file mode 100755
index f5e73ca8b1..0000000000
--- a/bin/setup_testing
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash -e
-
-ROOT=$(dirname $0)/..
-cd $ROOT
-
-bin/build_hash check
-
-TARGET_ROOT=inst/faucet/daq-faucet-faucet
-
-for postfix in 1 2; do
- TARGET=${TARGET_ROOT}$postfix
- echo Preparing $TARGET
- sudo rm -rf $TARGET && mkdir -p $TARGET
- cp topology/alta-dev/faucet.yaml $TARGET/faucet.yaml
- cp topology/alta-dev/gauge.yaml $TARGET/gauge.yaml
-done
-
-cmd/faucet faucet1 6655
-cmd/faucet gauge faucet1 6656 9306
-cmd/faucet faucet2 6657
-cmd/faucet gauge faucet2 6658 9308
-
-sudo ip addr flush ganga
-sudo ip addr add 192.0.2.10/24 dev ganga
-
-sudo ovs-vsctl --if-exists del-br upstream -- add-br upstream
-sudo ip link del daqnw || true
-sudo ip link add daqnw type veth peer name t1bond
-sudo ip link set daqnw up
-sudo ip link set t1bond up
-sudo ovs-vsctl add-port upstream daqnw
-
-sudo ip link del up_bond || true
-sudo ip link add up_bond type bond mode 802.3ad lacp_rate fast
-sudo ip link set up_bond up
-sudo ip link set yamuna down
-sudo ip link set yamuna master up_bond
-sudo ip link set beas down
-sudo ip link set beas master up_bond
-sudo ovs-vsctl add-port upstream up_bond
-
-cmd/faux -n :t1bond
-cmd/faux :satlej
-cmd/faux :ravi
-cmd/faux :tapti
-
-echo
-docker exec daq-networking-t1bond ip addr
-echo Waiting for DHCP...
-sleep 30
-echo
-docker exec daq-faux-satlej ip addr show dev satlej
-echo
-docker exec daq-faux-ravi ip addr show dev ravi
-echo
-docker exec daq-faux-tapti ip addr show dev tapti
-echo
-docker exec daq-faux-satlej ping -c 3 google.com
-docker exec daq-faux-ravi ping -c 3 google.com
-docker exec daq-faux-tapti ping -c 3 google.com
-docker exec daq-faux-satlej ping -c 3 daq-faux-tapti
-echo
-echo Done with testing setup.
diff --git a/bin/test_daq b/bin/test_daq
index 279e3bc53b..779cef8998 100755
--- a/bin/test_daq
+++ b/bin/test_daq
@@ -31,6 +31,16 @@ echo -n "DAQ version "
git describe --dirty --always
echo
+TAGGED_VERSION=`cat etc/docker_images.ver`
+if ! git show $TAGGED_VERSION > /dev/null; then
+ echo
+ echo Tagged version $TAGGED_VERSION not found.
+ echo Maybe you need to fetch tags: git fetch --tags.
+ echo If this is on Travis, ensure tags were pushed to your repo.
+ echo
+ false
+fi
+
if [ -d faucet ]; then
echo -n "Last FAUCET commit "
(cd $FAUCET; git log -n 1 --pretty=format:"%h - %an, %ar : %s" || true)
diff --git a/bin/troubleshoot b/bin/troubleshoot
new file mode 100755
index 0000000000..06b3432bd3
--- /dev/null
+++ b/bin/troubleshoot
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+ROOT=$(realpath $(dirname $0)/..)
+cd $ROOT
+
+if [ ! -d inst ]; then
+ echo "Error: run this script after a test run completes"
+ exit 1
+fi
+
+# After the system settles (early on some dpid=1 messages are expected) if we see
+# unknown dpid in faucet log, dpid might be misconfigured
+unknown_dpid=`fgrep 'unknown datapath' inst/faucet.log | wc -l`
+if [ "$unknown_dpid" -gt 20 ]; then
+ echo "Error: Faucet reports unknown datapath DPID:"
+ fgrep 'unknown datapath' inst/faucet.log | tail -n1
+ echo "Check if switch_setup:of_dpid in config matches the DPID on the physical switch"
+else
+ echo "Checking DPID misconfig: ok"
+fi
+
+# If the switch test failed with a monitoring timeout, switch login info could be wrong
+switch_timeout=`fgrep 'Monitoring timeout for switch' inst/cmdrun.log`
+if [ -n "$switch_timeout" ]; then
+ echo "Error: Timeout connecting to physical switch"
+ echo "Check switch username/password configuration"
+else
+ echo "Checking Switch timeout: ok"
+fi
diff --git a/bin/validate b/bin/validate
index bd13b42612..9789659c11 100755
--- a/bin/validate
+++ b/bin/validate
@@ -20,16 +20,14 @@ if [ -z "$schema_path" ]; then
false
fi
-validator/bin/build
+echo Building validator...
+validator/bin/build > /dev/null
-unset GOOGLE_CLOUD_PROJECT
-export GOOGLE_APPLICATION_CREDENTIALS=$PWD/$gcp_cred
-echo Using credentials from $GOOGLE_APPLICATION_CREDENTIALS
echo Configured topic is $gcp_topic
echo Configured schema is $schema_path
-if [ -n "$site_path" ]; then
- echo Configured site path is $site_path
-fi
+echo Configured site path is $site_path
echo
+echo Running tools version `git describe`
+
validator/bin/validate $schema_path pubsub:$gcp_topic dev $site_path
diff --git a/cmd/build b/cmd/build
index dc428ba90d..834b1078a5 100755
--- a/cmd/build
+++ b/cmd/build
@@ -26,11 +26,12 @@ DOCKER_IMAGE_VER=docker_images.ver
cd $ROOT
source etc/config_base.sh
-host_tests=$host_tests bin/docker_build_files
+echo host_tests=$host_tests
+test_targets=$(host_tests=$host_tests bin/docker_build_files)
function pull_images {
TAG=$1
declare -A test_set
- for target in $(host_tests=$host_tests bin/docker_build_files); do
+ for target in $test_targets; do
target=$(echo $target | sed 's|^.*/Dockerfile.||' | echo daqf/$( /dev/null 2>&1 || service docker start
@@ -81,7 +82,8 @@ sudo rm -f $cleanup_file
function autostart {
tmp=`mktemp`
echo DAQ autostart $@
- eval $@ | tee $tmp
+ eval $@ > $tmp # Don't use "eval $@ | tee $tmp" here; breaks cmd/usi.
+ cat $tmp
grep -e '^\s*DAQ autoclean\s' $tmp >> $cleanup_file || true
}
@@ -99,12 +101,19 @@ if [ -n "$switch_setup_ext_br" ]; then
autostart bin/external_ovs
fi
+if [ -n "$switch_setup_alt_port" ]; then
+ autostart bin/alt_faucet
+fi
+
if [ -n "$switch_setup_model" ]; then
autostart bin/physical_sec
else
echo No external switch model specified.
fi
+docker rm -f daq-usi || true
+autostart cmd/usi
+
# Kill any gateways so that they don't prematurely assign an IP address.
gwids=$(docker ps --format '{{ .Image }} {{ .Names }}' | fgrep daqf/networking | awk '{print $2}') || true
for gwid in $gwids; do
diff --git a/cmd/usi b/cmd/usi
new file mode 100755
index 0000000000..fe39673d89
--- /dev/null
+++ b/cmd/usi
@@ -0,0 +1,18 @@
+#!/bin/bash -e
+TMP_DIR=/tmp/usi
+
+function dump_ovs_interfaces {
+ while true; do
+ sudo ovs-ofctl show sec > $TMP_DIR/ovs_output.txt || true
+ sleep 5
+ done
+}
+
+echo Starting USI
+mkdir -p $TMP_DIR
+dump_ovs_interfaces &
+PID=$!
+docker run -d -v /tmp/usi:/ovs --privileged --network=host --name daq-usi daqf/usi
+
+echo DAQ autoclean docker kill daq-usi
+echo DAQ autoclean kill $PID
diff --git a/config/faucet/faucet_alt-switch.yaml b/config/faucet/faucet_alt-switch.yaml
new file mode 100644
index 0000000000..876cf786fd
--- /dev/null
+++ b/config/faucet/faucet_alt-switch.yaml
@@ -0,0 +1,22 @@
+dps:
+ alt-switch:
+ dp_id: 2
+ interfaces:
+ 1:
+ native_vlan: 1001
+ 2:
+ native_vlan: 1002
+ 3:
+ native_vlan: 1003
+ 4:
+ native_vlan: 1004
+ 5:
+ native_vlan: 1005
+ 100:
+ tagged_vlans: [1001, 1002, 1003, 1004, 1005]
+vlans:
+ 1001:
+ 1002:
+ 1003:
+ 1004:
+ 1005:
diff --git a/config/modules/all.conf b/config/modules/all.conf
index 4d897ae7c5..59b4b74367 100644
--- a/config/modules/all.conf
+++ b/config/modules/all.conf
@@ -7,4 +7,5 @@ include subset/connection/build.conf
include subset/bacnet/build.conf
include subset/security/build.conf
include subset/cloud/build.conf
-include subset/manual/build.conf
\ No newline at end of file
+include subset/manual/build.conf
+include subset/ntp/build.conf
diff --git a/config/modules/host.conf b/config/modules/host.conf
index e6ec5e37ef..cd9e9421b4 100644
--- a/config/modules/host.conf
+++ b/config/modules/host.conf
@@ -12,6 +12,10 @@ add mudgee
# Additional base modules
include subset/pentests/build.conf
+include usi/build.conf
+
+# Extended dhcp tests
+add ipaddr
# Example of how to remove something.
remove unused
diff --git a/config/modules/topo.conf b/config/modules/topo.conf
index 4d47ec6cf9..ba02ce2374 100644
--- a/config/modules/topo.conf
+++ b/config/modules/topo.conf
@@ -3,3 +3,4 @@ build docker/modules
# Use ping with runtime configuration for topo testing.
add ping
+include usi/build.conf
diff --git a/config/system/all.conf b/config/system/all.conf
index 0fe77da8fb..612cdc89cb 100644
--- a/config/system/all.conf
+++ b/config/system/all.conf
@@ -3,7 +3,7 @@
# Load defaults.
source config/system/default.yaml
-# Description description for dashboard.
+# Description for dashboard.
site_description="Multi-Device All-Tests Configuration"
# Upstream dataplane port from the external (secondary) switch.
diff --git a/config/system/alt.yaml b/config/system/alt.yaml
new file mode 100644
index 0000000000..e429326c7f
--- /dev/null
+++ b/config/system/alt.yaml
@@ -0,0 +1,20 @@
+# Example configuration file for using an OVS switch not managed by DAQ.
+
+# Load defaults.
+include: config/system/default.yaml
+
+# Description for dashboard.
+site_description: "Alternate (not managed by DAQ) OVS switch configuration"
+
+# Network switch configuration.
+switch_setup:
+ data_intf: alt-intf
+ alt_port: 6669
+ uplink_port: 100
+ ext_br: alt-switch
+
+# Faux device connection for testing.
+interfaces:
+ faux:
+ opts:
+ port: 2
diff --git a/config/system/default.yaml b/config/system/default.yaml
index 644993b96e..67daa36ba0 100644
--- a/config/system/default.yaml
+++ b/config/system/default.yaml
@@ -37,3 +37,7 @@ long_dhcp_response_sec: 105
# finish hook: executed at the end of every test
finish_hook: bin/dump_network
+
+# usi url for DAQ to connect to
+usi_setup:
+ url: localhost:5000
diff --git a/config/system/ext.conf b/config/system/ext.conf
index 8625109734..28dc34b707 100644
--- a/config/system/ext.conf
+++ b/config/system/ext.conf
@@ -3,7 +3,7 @@
# Load defaults.
source config/system/default.yaml
-# Description description for dashboard.
+# Description for dashboard.
site_description="External (not integrated with DAQ) OVS switch configuration"
# Network switch configuration.
diff --git a/config/system/ext.yaml b/config/system/ext.yaml
index 7ad626341e..4fef079c1e 100644
--- a/config/system/ext.yaml
+++ b/config/system/ext.yaml
@@ -3,7 +3,7 @@
# Load defaults.
include: config/system/default.yaml
-# Description description for dashboard.
+# Description for dashboard.
site_description: "External (not integrated with DAQ) OVS switch configuration"
# Network switch configuration.
diff --git a/config/system/muddy.conf b/config/system/muddy.conf
index 6510e2b113..3d3a17b30c 100644
--- a/config/system/muddy.conf
+++ b/config/system/muddy.conf
@@ -3,7 +3,7 @@
# Load defaults.
source config/system/default.yaml
-# Description description for dashboard.
+# Description for dashboard.
site_description="Multi-Device Configuration"
# Upstream dataplane port from the external (secondary) switch.
diff --git a/config/system/multi.conf b/config/system/multi.conf
index 185bbc40df..367a94e86b 100644
--- a/config/system/multi.conf
+++ b/config/system/multi.conf
@@ -3,7 +3,7 @@
# Load defaults.
source config/system/default.yaml
-# Description description for dashboard.
+# Description for dashboard.
site_description="Multi-Device Configuration"
# Upstream dataplane port from the external (secondary) switch.
diff --git a/daq/docker_test.py b/daq/docker_test.py
index fdabebd0f7..c38b210b8d 100644
--- a/daq/docker_test.py
+++ b/daq/docker_test.py
@@ -19,18 +19,17 @@ class DockerTest:
CONTAINER_PREFIX = 'daq'
# pylint: disable=too-many-arguments
- def __init__(self, runner, target_port, tmpdir, test_name, env_vars=None):
+ def __init__(self, host, target_port, tmpdir, test_name, module_config):
self.target_port = target_port
self.tmpdir = tmpdir
self.test_name = test_name
- self.runner = runner
+ self.runner = host.runner
self.host_name = '%s%02d' % (test_name, self.target_port)
self.docker_log = None
self.docker_host = None
self.callback = None
self.start_time = None
self.pipe = None
- self.env_vars = env_vars or []
self._finish_hook = None
def start(self, port, params, callback, finish_hook):
@@ -44,7 +43,7 @@ def start(self, port, params, callback, finish_hook):
def opt_param(key):
return params.get(key) or '' # Substitute empty string for None
- env_vars = self.env_vars + [
+ env_vars = [
"TARGET_NAME=" + self.host_name,
"TARGET_IP=" + params['target_ip'],
"TARGET_MAC=" + params['target_mac'],
@@ -168,3 +167,6 @@ def _docker_complete(self):
LOGGER.info("Target port %d test %s passed %ss",
self.target_port, self.test_name, delay)
self.callback(return_code=return_code, exception=exception)
+
+ def ip_listener(self, target_ip):
+ """Do nothing b/c docker tests don't care about ip notifications"""
diff --git a/daq/gateway.py b/daq/gateway.py
index c7c54182a5..4ee9bc2fd1 100644
--- a/daq/gateway.py
+++ b/daq/gateway.py
@@ -12,6 +12,7 @@
LOGGER = logger.get_logger('gateway')
+
class Gateway():
"""Gateway collection class for managing testing services"""
@@ -37,8 +38,8 @@ def __init__(self, runner, name, port_set, network):
self.dummy = None
self.tmpdir = None
self.targets = {}
- self.test_ports = {}
- self.ready = {}
+ self.test_ports = set()
+ self.ready = set()
self.activated = False
self.result_linger = False
self._scan_monitor = None
@@ -125,6 +126,18 @@ def request_new_ip(self, mac):
"""Requests a new ip for the device"""
self.execute_script('new_ip', mac)
+ def change_dhcp_response_time(self, mac, time):
+ """Change dhcp response time for device mac"""
+ self.execute_script('change_dhcp_response_time', mac, time)
+
+ def stop_dhcp_response(self, mac):
+ """Stops DHCP response for the device"""
+ self.change_dhcp_response_time(mac, -1)
+
+ def change_dhcp_range(self, start, end, prefix_length):
+ """Change dhcp range for devices"""
+ self.execute_script('change_dhcp_range', start, end, prefix_length)
+
def allocate_test_port(self):
"""Get the test port to use for this gateway setup"""
test_port = self._switch_port(self.TEST_OFFSET_START)
@@ -132,7 +145,7 @@ def allocate_test_port(self):
test_port = test_port + 1
limit_port = self._switch_port(self.NUM_SET_PORTS)
assert test_port < limit_port, 'no test ports available'
- self.test_ports[test_port] = True
+ self.test_ports.add(test_port)
return test_port
def _startup_scan(self, host):
@@ -160,7 +173,7 @@ def _scan_error(self, e):
def release_test_port(self, test_port):
"""Release the given port from the gateway"""
assert test_port in self.test_ports, 'test port not allocated'
- del self.test_ports[test_port]
+ self.test_ports.remove(test_port)
def _switch_port(self, offset):
return self.port_set * self.SET_SPACING + offset
@@ -207,7 +220,7 @@ def target_ready(self, target_mac):
"""Mark a target ready, and return set of ready targets"""
if not target_mac in self.ready:
LOGGER.info('Ready target %s from gateway group %s', target_mac, self.name)
- self.ready[target_mac] = True
+ self.ready.add(target_mac)
return self.ready
def get_targets(self):
diff --git a/daq/gcp.py b/daq/gcp.py
index 17051870a1..67db631e0b 100644
--- a/daq/gcp.py
+++ b/daq/gcp.py
@@ -24,6 +24,7 @@
# pylint: disable=no-member
DESCENDING = firestore.Query.DESCENDING
+
def get_timestamp():
""""Get a JSON-compatible formatted timestamp"""
return to_timestamp(datetime.datetime.now(datetime.timezone.utc))
@@ -49,7 +50,7 @@ def __init__(self, config, callback_handler):
self._callback_handler = callback_handler
cred_file = self.config.get('gcp_cred')
if not cred_file:
- LOGGER.info('No gcp_cred filr specified in config, disabling gcp use.')
+ LOGGER.info('No gcp_cred file specified in config, disabling gcp use.')
self._pubber = None
self._storage = None
self._firestore = None
diff --git a/daq/host.py b/daq/host.py
index dc13f9b659..ee1e1a7e21 100644
--- a/daq/host.py
+++ b/daq/host.py
@@ -5,17 +5,20 @@
import shutil
import time
from datetime import timedelta, datetime
+import grpc
from clib import tcpdump_helper
+
from report import ResultType, ReportGenerator
+from proto import usi_pb2 as usi
+from proto import usi_pb2_grpc as usi_service
import configurator
import docker_test
import gcp
+import ipaddr_test
import logger
-LOGGER = logger.get_logger('host')
-
class _STATE:
"""Host state enum for testing cycle"""
@@ -46,15 +49,21 @@ class MODE:
LONG = 'long'
MERR = 'merr'
+
def pre_states():
"""Return pre-test states for basic operation"""
- return ['startup', 'sanity', 'ipaddr', 'base', 'monitor']
+ return ['startup', 'sanity', 'acquire', 'base', 'monitor']
def post_states():
"""Return post-test states for recording finalization"""
return ['finish', 'info', 'timer']
+def get_test_config(config, test):
+ """Get a single test module's config"""
+ return config["modules"].get(test)
+
+
class ConnectedHost:
"""Class managing a device-under-test"""
@@ -68,6 +77,7 @@ class ConnectedHost:
_CONFIG_DIR = "config/"
_TIMEOUT_EXCEPTION = TimeoutError('Timeout expired')
+ # pylint: disable=too-many-statements
def __init__(self, runner, gateway, target, config):
self.configurator = configurator.Configurator()
self.runner = runner
@@ -81,6 +91,7 @@ def __init__(self, runner, gateway, target, config):
self.devdir = self._init_devdir()
self.run_id = self.make_runid()
self.scan_base = os.path.abspath(os.path.join(self.devdir, 'scans'))
+ self.logger = logger.get_logger('host%s' % self.target_port)
self._port_base = self._get_port_base()
self._device_base = self._get_device_base()
self.state = None
@@ -96,6 +107,7 @@ def __init__(self, runner, gateway, target, config):
_default_timeout_sec = int(config.get('default_timeout_sec', 0))
self._default_timeout_sec = _default_timeout_sec if _default_timeout_sec else None
self._finish_hook_script = config.get('finish_hook')
+ self._usi_url = config.get('usi_setup', {}).get('url')
self._mirror_intf_name = None
self._monitor_ref = None
self._monitor_start = None
@@ -106,7 +118,8 @@ def __init__(self, runner, gateway, target, config):
assert self._loaded_config, 'config was not loaded'
self._write_module_config(self._loaded_config, self._device_aux_path())
self.remaining_tests = self._get_enabled_tests()
- LOGGER.info('Host %s running with enabled tests %s', self.target_port, self.remaining_tests)
+ self.logger.info('Host %s running with enabled tests %s', self.target_port,
+ self.remaining_tests)
self._report = ReportGenerator(config, self._INST_DIR, self.target_mac,
self._loaded_config)
self.record_result('startup', state=MODE.PREP)
@@ -115,6 +128,7 @@ def __init__(self, runner, gateway, target, config):
self._startup_file = None
self.timeout_handler = self._aux_module_timeout_handler
self._all_ips = []
+ self._ip_listener = None
@staticmethod
def make_runid():
@@ -133,7 +147,7 @@ def _get_port_base(self):
return None
conf_base = os.path.abspath(os.path.join(test_config, 'port-%02d' % self.target_port))
if not os.path.isdir(conf_base):
- LOGGER.warning('Test config directory not found: %s', conf_base)
+ self.logger.warning('Test config directory not found: %s', conf_base)
return None
return conf_base
@@ -148,22 +162,25 @@ def _make_control_bundle(self):
'paused': self.state == _STATE.READY
}
+ def _get_test_config(self, test):
+ return get_test_config(self._loaded_config, test)
+
def _test_enabled(self, test):
fallback_config = {'enabled': test in self._CORE_TESTS}
- test_config = self._loaded_config['modules'].get(test, fallback_config)
+ test_config = self._get_test_config(test) or fallback_config
return test_config.get('enabled', True)
def _get_test_timeout(self, test):
- test_module = self._loaded_config['modules'].get(test)
if test == 'hold':
return None
+ test_module = self._get_test_config(test)
if not test_module:
return self._default_timeout_sec
return test_module.get('timeout_sec', self._default_timeout_sec)
def get_port_flap_timeout(self, test):
"""Get port toggle timeout configuration that's specific to each test module"""
- test_module = self._loaded_config['modules'].get(test)
+ test_module = self._get_test_config(test)
if not test_module:
return None
return test_module.get('port_flap_timeout_sec')
@@ -204,7 +221,7 @@ def _type_path(self):
device_type = dev_config.get('device_type')
if not device_type:
return None
- LOGGER.info('Configuring device %s as type %s', self.target_mac, device_type)
+ self.logger.info('Configuring device %s as type %s', self.target_mac, device_type)
site_path = self.config.get('site_path')
type_path = os.path.abspath(os.path.join(site_path, 'device_types', device_type))
return type_path
@@ -215,20 +232,20 @@ def _type_aux_path(self):
return None
aux_path = os.path.join(type_path, self._AUX_DIR)
if not os.path.exists(aux_path):
- LOGGER.info('Skipping missing type dir %s', aux_path)
+ self.logger.info('Skipping missing type dir %s', aux_path)
return None
return aux_path
def _create_device_dir(self, path):
- LOGGER.warning('Creating new device dir: %s', path)
+ self.logger.warning('Creating new device dir: %s', path)
os.makedirs(path)
template_dir = self.config.get('device_template')
if not template_dir:
- LOGGER.warning('Skipping defaults since no device_template found')
+ self.logger.warning('Skipping defaults since no device_template found')
return
- LOGGER.info('Copying template files from %s to %s', template_dir, path)
+ self.logger.info('Copying template files from %s to %s', template_dir, path)
for file in os.listdir(template_dir):
- LOGGER.info('Copying %s...', file)
+ self.logger.info('Copying %s...', file)
shutil.copy(os.path.join(template_dir, file), path)
def _upload_file(self, path):
@@ -237,7 +254,7 @@ def _upload_file(self, path):
def initialize(self):
"""Fully initialize a new host set"""
- LOGGER.info('Target port %d initializing...', self.target_port)
+ self.logger.info('Target port %d initializing...', self.target_port)
# There is a race condition here with ovs assigning ports, so wait a bit.
time.sleep(2)
shutil.rmtree(self.devdir, ignore_errors=True)
@@ -268,9 +285,27 @@ def _state_transition(self, target, expected=None):
message = 'state was %s expected %s' % (self.state, expected)
assert self.state == expected, message
assert self.state != _STATE.TERM, 'host already terminated'
- LOGGER.debug('Target port %d state: %s -> %s', self.target_port, self.state, target)
+ self.logger.debug('Target port %d state: %s -> %s', self.target_port, self.state, target)
self.state = target
+ def _build_switch_info(self) -> usi.SwitchInfo:
+ switch_config = self._get_switch_config()
+ model_str = switch_config['model']
+ if model_str == 'FAUX_SWITCH':
+ return None
+ if model_str:
+ switch_model = usi.SwitchModel.Value(model_str)
+ else:
+ switch_model = usi.SwitchModel.OVS_SWITCH
+ params = {
+ "ip_addr": switch_config["ip"],
+ "device_port": self.target_port,
+ "model": switch_model,
+ "username": switch_config["username"],
+ "password": switch_config["password"]
+ }
+ return usi.SwitchInfo(**params)
+
def is_running(self):
"""Return True if this host is running active test."""
return self.state != _STATE.ERROR and self.state != _STATE.DONE
@@ -285,14 +320,34 @@ def notify_activate(self):
self._record_result('startup', state=MODE.HOLD)
return self.state == _STATE.WAITING
+ def connect_port(self, connect):
+ """Connects/Disconnects port for this host"""
+ switch_info = self._build_switch_info()
+ if not switch_info:
+ self.logger.info('No switch model found, skipping port connect')
+ return False
+ try:
+ with grpc.insecure_channel(self._usi_url) as channel:
+ stub = usi_service.USIServiceStub(channel)
+ if connect:
+ res = stub.connect(switch_info)
+ else:
+ res = stub.disconnect(switch_info)
+ self.logger.info('Target port %s %s successful? %s', self.target_port, "connect"
+ if connect else "disconnect", res.success)
+ except Exception as e:
+ self.logger.error(e)
+ raise e
+ return True
+
def _prepare(self):
- LOGGER.info('Target port %d waiting for ip as %s', self.target_port, self.target_mac)
+ self.logger.info('Target port %d waiting for ip as %s', self.target_port, self.target_mac)
self._state_transition(_STATE.WAITING, _STATE.INIT)
self.record_result('sanity', state=MODE.DONE)
- self.record_result('ipaddr', state=MODE.EXEC)
+ self.record_result('acquire', state=MODE.EXEC)
static_ip = self._get_static_ip()
if static_ip:
- LOGGER.info('Target port %d using static ip', self.target_port)
+ self.logger.info('Target port %d using static ip', self.target_port)
time.sleep(self._STARTUP_MIN_TIME_SEC)
self.runner.ip_notify(MODE.NOPE, {
'mac': self.target_mac,
@@ -304,9 +359,9 @@ def _prepare(self):
# enables dhcp response for this device
wait_time = self.runner.config.get("long_dhcp_response_sec") \
if dhcp_mode == 'long_response' else 0
- LOGGER.info('Target port %d using %s DHCP mode, wait %s',
- self.target_port, dhcp_mode, wait_time)
- self.gateway.execute_script('change_dhcp_response_time', self.target_mac, wait_time)
+ self.logger.info('Target port %d using %s DHCP mode, wait %s',
+ self.target_port, dhcp_mode, wait_time)
+ self.gateway.change_dhcp_response_time(self.target_mac, wait_time)
_ = [listener(self) for listener in self._dhcp_listeners]
def _aux_module_timeout_handler(self):
@@ -315,7 +370,7 @@ def _aux_module_timeout_handler(self):
def _main_module_timeout_handler(self):
self.test_host.terminate()
- self._docker_callback(exception=self._TIMEOUT_EXCEPTION)
+ self._module_callback(exception=self._TIMEOUT_EXCEPTION)
def heartbeat(self):
"""Checks module run time for each event loop"""
@@ -326,7 +381,8 @@ def heartbeat(self):
nowtime = gcp.parse_timestamp(gcp.get_timestamp())
if nowtime >= timeout:
if self.timeout_handler:
- LOGGER.error('Monitoring timeout for %s after %ds', self.test_name, timeout_sec)
+ self.logger.error('Monitoring timeout for %s after %ds', self.test_name,
+ timeout_sec)
# ensure it's called once
handler, self.timeout_handler = self.timeout_handler, None
handler()
@@ -340,15 +396,15 @@ def _finalize_report(self):
report_paths = self._report.finalize()
if self._trigger_path:
report_paths.update({'trigger_path': self._trigger_path})
- LOGGER.info('Finalized with reports %s', list(report_paths.keys()))
+ self.logger.info('Finalized with reports %s', list(report_paths.keys()))
report_blobs = {name: self._upload_file(path) for name, path in report_paths.items()}
self.record_result('terminate', state=MODE.TERM, **report_blobs)
self._report = None
def terminate(self, reason, trigger=True):
"""Terminate this host"""
- LOGGER.info('Target port %d terminate, running %s, trigger %s: %s', self.target_port,
- self._host_name(), trigger, reason)
+ self.logger.info('Target port %d terminate, running %s, trigger %s: %s', self.target_port,
+ self._host_name(), trigger, reason)
self._state_transition(_STATE.TERM)
self._release_config()
self._monitor_cleanup()
@@ -360,8 +416,8 @@ def terminate(self, reason, trigger=True):
self.test_host = None
self.timeout_handler = None
except Exception as e:
- LOGGER.error('Target port %d terminating test: %s', self.target_port, e)
- LOGGER.exception(e)
+ self.logger.error('Target port %d terminating test: %s', self.target_port, e)
+ self.logger.exception(e)
if trigger:
self.runner.target_set_complete(self.target_port,
'Target port %d termination: %s' % (
@@ -380,8 +436,11 @@ def ip_notify(self, target_ip, state=MODE.DONE, delta_sec=-1):
with open(self._trigger_path, 'a') as output_stream:
output_stream.write('%s %s %d\n' % (target_ip, state, delta_sec))
self._all_ips.append({"ip": target_ip, "timestamp": time.time()})
- if self._get_dhcp_mode() == "ip_change" and len(self._all_ips) == 1:
- self.gateway.request_new_ip(self.target_mac)
+ # Update ip directly if it's already triggered.
+ if self.target_ip:
+ self.target_ip = target_ip
+ if self.test_host:
+ self.test_host.ip_listener(target_ip)
def trigger_ready(self):
"""Check if this host is ready to be triggered"""
@@ -397,41 +456,41 @@ def trigger_ready(self):
def trigger(self, state=MODE.DONE, target_ip=None, exception=None, delta_sec=-1):
"""Handle device trigger"""
if not self.target_ip and not self.trigger_ready():
- LOGGER.warn('Target port %d ignoring premature trigger', self.target_port)
+ self.logger.warn('Target port %d ignoring premature trigger', self.target_port)
return False
if self.target_ip:
- LOGGER.debug('Target port %d already triggered', self.target_port)
+ self.logger.debug('Target port %d already triggered', self.target_port)
assert self.target_ip == target_ip, "target_ip mismatch"
return True
self.target_ip = target_ip
self._record_result('info', state='%s/%s' % (self.target_mac, target_ip))
- self.record_result('ipaddr', ip=target_ip, state=state, exception=exception)
+ self.record_result('acquire', ip=target_ip, state=state, exception=exception)
if exception:
self._state_transition(_STATE.ERROR)
self.runner.target_set_error(self.target_port, exception)
else:
- LOGGER.info('Target port %d triggered as %s', self.target_port, target_ip)
+ self.logger.info('Target port %d triggered as %s', self.target_port, target_ip)
self._state_transition(_STATE.BASE, _STATE.WAITING)
return True
def _ping_test(self, src, dst, src_addr=None):
if not src or not dst:
- LOGGER.error('Invalid ping test params, src=%s, dst=%s', src, dst)
+ self.logger.error('Invalid ping test params, src=%s, dst=%s', src, dst)
return False
return self.runner.ping_test(src, dst, src_addr=src_addr)
def _startup_scan(self):
self._startup_file = os.path.join(self.scan_base, 'startup.pcap')
self._startup_time = datetime.now()
- LOGGER.info('Target port %d startup pcap capture', self.target_port)
+ self.logger.info('Target port %d startup pcap capture', self.target_port)
self._monitor_scan(self._startup_file)
def _monitor_scan(self, output_file, timeout=None):
assert not self._monitor_ref, 'tcp_monitor already active'
network = self.runner.network
tcp_filter = ''
- LOGGER.info('Target port %d pcap intf %s for %ss output in %s',
- self.target_port, self._mirror_intf_name, timeout, output_file)
+ self.logger.info('Target port %d pcap intf %s for %ss output in %s',
+ self.target_port, self._mirror_intf_name, timeout, output_file)
helper = tcpdump_helper.TcpdumpHelper(network.pri, tcp_filter, packets=None,
intf_name=self._mirror_intf_name,
timeout=timeout, pcap_out=output_file,
@@ -447,10 +506,10 @@ def _base_start(self):
success = self._base_tests()
self._monitor_cleanup()
if not success:
- LOGGER.warning('Target port %d base tests failed', self.target_port)
+ self.logger.warning('Target port %d base tests failed', self.target_port)
self._state_transition(_STATE.ERROR)
return
- LOGGER.info('Target port %d done with base.', self.target_port)
+ self.logger.info('Target port %d done with base.', self.target_port)
self._background_scan()
except Exception as e:
self._monitor_cleanup()
@@ -458,7 +517,7 @@ def _base_start(self):
def _monitor_cleanup(self, forget=True):
if self._monitor_ref:
- LOGGER.info('Target port %d network pcap complete', self.target_port)
+ self.logger.info('Target port %d network pcap complete', self.target_port)
active = self._monitor_ref.stream() and not self._monitor_ref.stream().closed
assert active == forget, 'forget and active mismatch'
self._upload_file(self._startup_file)
@@ -468,7 +527,7 @@ def _monitor_cleanup(self, forget=True):
self._monitor_ref = None
def _monitor_error(self, exception, forget=False):
- LOGGER.error('Target port %d monitor error: %s', self.target_port, exception)
+ self.logger.error('Target port %d monitor error: %s', self.target_port, exception)
self._monitor_cleanup(forget=forget)
self.record_result(self.test_name, exception=exception)
self._state_transition(_STATE.ERROR)
@@ -477,13 +536,13 @@ def _monitor_error(self, exception, forget=False):
def _background_scan(self):
self._state_transition(_STATE.MONITOR, _STATE.BASE)
if not self._monitor_scan_sec:
- LOGGER.info('Target port %d skipping background pcap', self.target_port)
+ self.logger.info('Target port %d skipping background pcap', self.target_port)
self._monitor_continue()
return
self.record_result('monitor', time=self._monitor_scan_sec, state=MODE.EXEC)
monitor_file = os.path.join(self.scan_base, 'monitor.pcap')
- LOGGER.info('Target port %d background pcap for %ds',
- self.target_port, self._monitor_scan_sec)
+ self.logger.info('Target port %d background pcap for %ds',
+ self.target_port, self._monitor_scan_sec)
self._monitor_scan(monitor_file, timeout=self._monitor_scan_sec)
def _monitor_timeout(self, timeout):
@@ -494,19 +553,20 @@ def _monitor_timeout(self, timeout):
self._monitor_complete()
def _monitor_complete(self):
- LOGGER.info('Target port %d pcap complete', self.target_port)
+ self.logger.info('Target port %d pcap complete', self.target_port)
self._monitor_cleanup(forget=False)
self.record_result('monitor', state=MODE.DONE)
self._monitor_continue()
def _monitor_continue(self):
self._state_transition(_STATE.NEXT, _STATE.MONITOR)
+ self.test_name = None
self._run_next_test()
def _base_tests(self):
self.record_result('base', state=MODE.EXEC)
if not self._ping_test(self.gateway.host, self.target_ip):
- LOGGER.debug('Target port %d warmup ping failed', self.target_port)
+ self.logger.debug('Target port %d warmup ping failed', self.target_port)
try:
success1 = self._ping_test(self.gateway.host, self.target_ip), 'simple ping failed'
success2 = self._ping_test(self.gateway.host, self.target_ip,
@@ -521,20 +581,19 @@ def _base_tests(self):
return True
def _run_next_test(self):
+ assert not self.test_name, 'test_name defined: %s' % self.test_name
try:
if self.remaining_tests:
- LOGGER.debug('Target port %d executing tests %s',
- self.target_port, self.remaining_tests)
- self.timeout_handler = self._main_module_timeout_handler
- self._docker_test(self.remaining_tests.pop(0))
+ self.logger.debug('Target port %d executing tests %s',
+ self.target_port, self.remaining_tests)
+ self._run_test(self.remaining_tests.pop(0))
else:
- LOGGER.info('Target port %d no more tests remaining', self.target_port)
+ self.logger.info('Target port %d no more tests remaining', self.target_port)
self.timeout_handler = self._aux_module_timeout_handler
self._state_transition(_STATE.DONE, _STATE.NEXT)
- self.test_name = None
self.record_result('finish', state=MODE.FINE)
except Exception as e:
- LOGGER.error('Target port %d start error: %s', self.target_port, e)
+ self.logger.error('Target port %d start error: %s', self.target_port, e)
self._state_transition(_STATE.ERROR)
self.runner.target_set_error(self.target_port, e)
@@ -547,12 +606,15 @@ def _device_aux_path(self):
os.makedirs(path)
return path
- def _docker_test(self, test_name):
- self.test_name = test_name
- self.test_start = gcp.get_timestamp()
- self.test_host = docker_test.DockerTest(self.runner, self.target_port,
- self.devdir, test_name)
- LOGGER.debug('test_host start %s/%s', test_name, self._host_name())
+ def _new_test(self, test_name):
+ clazz = ipaddr_test.IpAddrTest if test_name == 'ipaddr' else docker_test.DockerTest
+ return clazz(self, self.target_port, self.devdir, test_name, self._loaded_config)
+
+ def _run_test(self, test_name):
+ self.timeout_handler = self._main_module_timeout_handler
+ self.test_host = self._new_test(test_name)
+
+ self.logger.info('Target port %d start %s', self.target_port, self._host_name())
try:
self.test_port = self.runner.allocate_test_port(self.target_port)
@@ -561,7 +623,9 @@ def _docker_test(self, test_name):
raise e
try:
- self._start_test_host()
+ self._start_test(test_name)
+ params = self._get_module_params()
+ self.test_host.start(self.test_port, params, self._module_callback, self._finish_hook)
except Exception as e:
self.test_host = None
self.runner.release_test_port(self.target_port, self.test_port)
@@ -569,14 +633,34 @@ def _docker_test(self, test_name):
self._monitor_cleanup()
raise e
- def _start_test_host(self):
- params = self._get_module_params()
+ def _start_test(self, test_name):
+ self.test_name = test_name
+ self.test_start = gcp.get_timestamp()
self._write_module_config(self._loaded_config, self._host_tmp_path())
self._record_result(self.test_name, config=self._loaded_config, state=MODE.CONF)
self.record_result(self.test_name, state=MODE.EXEC)
self._monitor_scan(os.path.join(self.scan_base, 'test_%s.pcap' % self.test_name))
self._state_transition(_STATE.TESTING, _STATE.NEXT)
- self.test_host.start(self.test_port, params, self._docker_callback, self._finish_hook)
+
+ def _end_test(self, state=MODE.DONE, return_code=None, exception=None):
+ self._monitor_cleanup()
+ self._state_transition(_STATE.NEXT, _STATE.TESTING)
+ report_path = os.path.join(self._host_tmp_path(), 'report.txt')
+ activation_log_path = os.path.join(self._host_dir_path(), 'activate.log')
+ module_config_path = os.path.join(self._host_tmp_path(), self._MODULE_CONFIG)
+ remote_paths = {}
+ for result_type, path in ((ResultType.REPORT_PATH, report_path),
+ (ResultType.ACTIVATION_LOG_PATH, activation_log_path),
+ (ResultType.MODULE_CONFIG_PATH, module_config_path)):
+ if os.path.isfile(path):
+ self._report.accumulate(self.test_name, {result_type: path})
+ remote_paths[result_type.value] = self._upload_file(path)
+ self.record_result(self.test_name, state=state, code=return_code, exception=exception,
+ **remote_paths)
+ self.test_name = None
+ self.test_host = None
+ self.timeout_handler = None
+ self._run_next_test()
def _get_module_params(self):
switch_setup = self.switch_setup if 'mods_addr' in self.switch_setup else None
@@ -620,35 +704,19 @@ def _finish_hook(self):
finish_dir = os.path.join(self.devdir, 'finish', self._host_name())
shutil.rmtree(finish_dir, ignore_errors=True)
os.makedirs(finish_dir)
- LOGGER.info('Executing finish_hook: %s %s', self._finish_hook_script, finish_dir)
+ self.logger.info('Executing finish_hook: %s %s', self._finish_hook_script, finish_dir)
os.system('%s %s 2>&1 > %s/finish.out' %
(self._finish_hook_script, finish_dir, finish_dir))
- def _docker_callback(self, return_code=None, exception=None):
+ def _module_callback(self, return_code=None, exception=None):
host_name = self._host_name()
- LOGGER.info('Host callback %s/%s was %s with %s',
- self.test_name, host_name, return_code, exception)
- self._monitor_cleanup()
+ self.logger.info('Host callback %s/%s was %s with %s',
+ self.test_name, host_name, return_code, exception)
failed = return_code or exception
state = MODE.MERR if failed else MODE.DONE
- report_path = os.path.join(self._host_tmp_path(), 'report.txt')
- activation_log_path = os.path.join(self._host_dir_path(), 'activate.log')
- module_config_path = os.path.join(self._host_tmp_path(), self._MODULE_CONFIG)
- remote_paths = {}
- for result_type, path in ((ResultType.REPORT_PATH, report_path),
- (ResultType.ACTIVATION_LOG_PATH, activation_log_path),
- (ResultType.MODULE_CONFIG_PATH, module_config_path)):
- if os.path.isfile(path):
- self._report.accumulate(self.test_name, {result_type: path})
- remote_paths[result_type.value] = self._upload_file(path)
- self.record_result(self.test_name, state=state, code=return_code, exception=exception,
- **remote_paths)
self.runner.release_test_port(self.target_port, self.test_port)
- self._state_transition(_STATE.NEXT, _STATE.TESTING)
- assert self.test_host, '_docker_callback with no test_host defined'
- self.test_host = None
- self.timeout_handler = None
- self._run_next_test()
+ assert self.test_host, '_module_callback with no test_host defined'
+ self._end_test(state=state, return_code=return_code, exception=exception)
def _merge_run_info(self, config):
config['run_info'] = {
@@ -672,8 +740,8 @@ def record_result(self, name, **kwargs):
"""Record a named result for this test"""
current = gcp.get_timestamp()
if name != self.test_name:
- LOGGER.debug('Target port %d report %s start %s',
- self.target_port, name, current)
+ self.logger.debug('Target port %d report %s start %s',
+ self.target_port, name, current)
self.test_name = name
self.test_start = current
if name:
@@ -709,12 +777,12 @@ def _exception_message(self, exception):
return str(exception)
def _control_updated(self, control_config):
- LOGGER.info('Updated control config: %s %s', self.target_mac, control_config)
+ self.logger.info('Updated control config: %s %s', self.target_mac, control_config)
paused = control_config.get('paused')
if not paused and self.is_ready():
self._start_run()
elif paused and not self.is_ready():
- LOGGER.warning('Inconsistent control state for update of %s', self.target_mac)
+ self.logger.warning('Inconsistent control state for update of %s', self.target_mac)
def reload_config(self):
"""Trigger a config reload due to an external config change."""
@@ -723,12 +791,12 @@ def reload_config(self):
if device_ready:
self._loaded_config = new_config
config_bundle = self._make_config_bundle(new_config)
- LOGGER.info('Device config reloaded: %s %s', device_ready, self.target_mac)
+ self.logger.info('Device config reloaded: %s %s', device_ready, self.target_mac)
self._record_result(None, run_info=device_ready, config=config_bundle)
return new_config
def _dev_config_updated(self, dev_config):
- LOGGER.info('Device config update: %s %s', self.target_mac, dev_config)
+ self.logger.info('Device config update: %s %s', self.target_mac, dev_config)
self._write_module_config(dev_config, self._device_base)
self.reload_config()
diff --git a/daq/ipaddr_test.py b/daq/ipaddr_test.py
new file mode 100644
index 0000000000..451192f75d
--- /dev/null
+++ b/daq/ipaddr_test.py
@@ -0,0 +1,91 @@
+"""Test module encapsulating ip-address tests (including DHCP)"""
+
+from __future__ import absolute_import
+import time
+import os
+import copy
+import logger
+
+LOGGER = logger.get_logger('ipaddr')
+
+
+class IpAddrTest:
+ """Module for inline ipaddr tests"""
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, host, target_port, tmpdir, test_name, module_config):
+ self.host = host
+ self.target_port = target_port
+ self.tmpdir = tmpdir
+ self.test_config = module_config.get('modules').get('ipaddr')
+ self.test_dhcp_ranges = copy.copy(self.test_config.get('dhcp_ranges', []))
+ self.test_name = test_name
+ self.host_name = '%s%02d' % (test_name, self.target_port)
+ self.log_path = os.path.join(self.tmpdir, 'nodes', self.host_name, 'activate.log')
+ self.log_file = None
+ self.callback = None
+ self._ip_callback = None
+ self.tests = [
+ ('dhcp port_toggle test', self._dhcp_port_toggle_test),
+ ('dhcp multi subnet test', self._multi_subnet_test),
+ ('ip change test', self._ip_change_test),
+ ('finalize', self._finalize)
+ ]
+
+ def start(self, port, params, callback, finish_hook):
+ """Start the ip-addr tests"""
+ self.callback = callback
+ LOGGER.debug('Target port %d starting ipaddr test %s', self.target_port, self.test_name)
+ self.log_file = open(self.log_path, 'w')
+ self._next_test()
+
+ def _next_test(self):
+ try:
+ name, func = self.tests.pop(0)
+ self.log('Running ' + name)
+ func()
+ except Exception as e:
+ self.log(str(e))
+ self._finalize(exception=e)
+
+ def log(self, message):
+ """Log an activation message"""
+ LOGGER.info(message)
+ self.log_file.write(message + '\n')
+
+ def _dhcp_port_toggle_test(self):
+ if not self.host.connect_port(False):
+ self.log('disconnect port not enabled')
+ return
+ time.sleep(self.host.config.get("port_debounce_sec", 0) + 1)
+ self.host.connect_port(True)
+ self._ip_callback = self._next_test
+
+ def _multi_subnet_test(self):
+ if not self.test_dhcp_ranges:
+ self._next_test()
+ return
+ dhcp_range = self.test_dhcp_ranges.pop(0)
+ self.log('Testing dhcp range: ' + ",".join([str(arg) for arg in dhcp_range]))
+ self.host.gateway.change_dhcp_range(*dhcp_range)
+ self._ip_callback = self._multi_subnet_test if self.test_dhcp_ranges else self._next_test
+
+ def _ip_change_test(self):
+ self.host.gateway.request_new_ip(self.host.target_mac)
+ self._ip_callback = self._next_test
+
+ def _finalize(self, exception=None):
+ self.terminate()
+ self.callback(exception=exception)
+
+ def terminate(self):
+ """Terminate this set of tests"""
+ self.log('Module terminating')
+ self.log_file.close()
+ self.log_file = None
+
+ def ip_listener(self, target_ip):
+ """Respond to a ip notification event"""
+ self.log('ip notification %s' % target_ip)
+ if self._ip_callback:
+ self._ip_callback()
diff --git a/daq/network.py b/daq/network.py
index b622205926..255a6bd259 100644
--- a/daq/network.py
+++ b/daq/network.py
@@ -136,7 +136,7 @@ def _attach_sec_device_links(self):
def is_system_port(self, dpid, port):
"""Check if the dpid/port combo is the system trunk port"""
- return dpid == self.topology.PRI_DPID and port == self.topology.PRI_STACK_PORT
+ return dpid == self.topology.PRI_DPID and port == self.topology.PRI_TRUNK_PORT
def is_device_port(self, dpid, port):
"""Check if the dpid/port combo is for a valid device"""
diff --git a/daq/report.py b/daq/report.py
index 311c8fe89e..d3ee8e5046 100644
--- a/daq/report.py
+++ b/daq/report.py
@@ -16,7 +16,6 @@
import gcp
import logger
-
LOGGER = logger.get_logger('report')
class ResultType(Enum):
@@ -306,6 +305,7 @@ def _get_test_info(self, test_name):
return self._module_config.get('tests', {}).get(test_name, {})
def _write_repitems(self):
+ from host import get_test_config # Deferring import
for (test_name, result_dict) in self._repitems.items():
# To not write a module header if there is nothing to report
def writeln(line, test_name=test_name):
@@ -318,7 +318,8 @@ def writeln(line, test_name=test_name):
writeln(self._TEST_SUBHEADER % "Report")
self._append_file(result_dict[ResultType.REPORT_PATH])
if ResultType.MODULE_CONFIG in result_dict:
- config = result_dict[ResultType.MODULE_CONFIG].get("modules", {}).get(test_name)
+ module_configs = result_dict[ResultType.MODULE_CONFIG]
+ config = get_test_config(module_configs, test_name)
if config and len(config) > 0:
writeln(self._TEST_SUBHEADER % "Module Config")
table = MdTable(["Attribute", "Value"])
diff --git a/daq/runner.py b/daq/runner.py
index 631aa43526..5895045271 100644
--- a/daq/runner.py
+++ b/daq/runner.py
@@ -393,7 +393,7 @@ def _target_set_trigger(self, target_port):
# Stops all DHCP response initially
# Selectively enables dhcp response at ipaddr stage based on dhcp mode
- gateway.execute_script('change_dhcp_response_time', target_mac, -1)
+ gateway.stop_dhcp_response(target_mac)
gateway.attach_target(target_port, target)
try:
diff --git a/daq/topology.py b/daq/topology.py
index 2438b49012..2cda43ebe4 100644
--- a/daq/topology.py
+++ b/daq/topology.py
@@ -31,13 +31,14 @@ class FaucetTopology:
INCOMING_ACL_FORMAT = "dp_%s_incoming_acl"
PORTSET_ACL_FORMAT = "dp_%s_portset_%d_acl"
LOCAL_ACL_FORMAT = "dp_%s_local_acl"
- _DEFAULT_STACK_PORT_NAME = "stack_sec"
+ _DEFAULT_SEC_TRUNK_NAME = "trunk_sec"
_MIRROR_IFACE_FORMAT = "mirror-%d"
_MIRROR_PORT_BASE = 1000
_SWITCH_LOCAL_PORT = _MIRROR_PORT_BASE
_VLAN_BASE = 1000
PRI_DPID = 1
- PRI_STACK_PORT = 1
+ PRI_TRUNK_PORT = 1
+ PRI_TRUNK_NAME = 'trunk_pri'
_NO_VLAN = "0x0000/0x1000"
def __init__(self, config):
@@ -92,7 +93,7 @@ def get_sec_dpid(self):
return self.sec_dpid
def get_sec_port(self):
- """Return the secondary stacking port"""
+ """Return the secondary trunk port"""
return self.sec_port
def get_device_intfs(self):
@@ -173,20 +174,23 @@ def _update_port_vlan(self, port_no, port_set):
def _port_set_vlan(self, port_set=None):
return self._VLAN_BASE + (port_set if port_set else 0)
- def _make_pri_stack_interface(self):
+ def _make_pri_trunk_interface(self):
interface = {}
interface['acl_in'] = self.INCOMING_ACL_FORMAT % self.pri_name
- interface['stack'] = {'dp': self.sec_name, 'port': self.sec_port}
- interface['name'] = 'stack_pri'
+ interface['tagged_vlans'] = self._vlan_tags()
+ interface['name'] = self.PRI_TRUNK_NAME
return interface
- def _make_sec_stack_interface(self):
+ def _make_sec_trunk_interface(self):
interface = {}
interface['acl_in'] = self.INCOMING_ACL_FORMAT % self.sec_name
- interface['stack'] = {'dp': self.pri_name, 'port': self.PRI_STACK_PORT}
- interface['name'] = self.get_ext_intf() or self._DEFAULT_STACK_PORT_NAME
+ interface['tagged_vlans'] = self._vlan_tags()
+ interface['name'] = self.get_ext_intf() or self._DEFAULT_SEC_TRUNK_NAME
return interface
+ def _vlan_tags(self):
+ return list(range(self._VLAN_BASE, self._VLAN_BASE + self.sec_port))
+
def _make_default_acl_rules(self):
rules = []
if not self._append_acl_template(rules, 'raw'):
@@ -201,7 +205,7 @@ def _make_sec_port_interface(self, port_no):
def _make_pri_interfaces(self):
interfaces = {}
- interfaces[self.PRI_STACK_PORT] = self._make_pri_stack_interface()
+ interfaces[self.PRI_TRUNK_PORT] = self._make_pri_trunk_interface()
for port_set in range(1, self.sec_port):
for port in self._get_gw_ports(port_set):
interfaces[port] = self._make_gw_interface(port_set)
@@ -212,7 +216,7 @@ def _make_pri_interfaces(self):
def _make_sec_interfaces(self):
interfaces = {}
- interfaces[self.sec_port] = self._make_sec_stack_interface()
+ interfaces[self.sec_port] = self._make_sec_trunk_interface()
for port in range(1, self.sec_port):
interfaces[port] = self._make_sec_port_interface(port)
return interfaces
@@ -227,23 +231,24 @@ def _make_acl_include(self):
def _make_pri_topology(self):
pri_dp = {}
pri_dp['dp_id'] = self.PRI_DPID
- pri_dp['name'] = self.pri_name
- pri_dp['stack'] = {'priority':1}
pri_dp['interfaces'] = self._make_pri_interfaces()
return pri_dp
def _make_sec_topology(self):
sec_dp = {}
sec_dp['dp_id'] = self.sec_dpid
- sec_dp['name'] = self.sec_name
sec_dp['interfaces'] = self._make_sec_interfaces()
return sec_dp
+ def _has_sec_switch(self):
+ return self.sec_dpid and self.sec_port
+
def _make_base_network_topology(self):
assert self.pri, 'pri dataplane not configured'
dps = {}
dps['pri'] = self._make_pri_topology()
- dps['sec'] = self._make_sec_topology()
+ if self._has_sec_switch():
+ dps['sec'] = self._make_sec_topology()
topology = {}
topology['dps'] = dps
topology['vlans'] = self._make_vlan_description(10)
diff --git a/docker/include/bin/start_faux b/docker/include/bin/start_faux
index 94c9711ab3..708b699e80 100755
--- a/docker/include/bin/start_faux
+++ b/docker/include/bin/start_faux
@@ -126,6 +126,15 @@ if [ -n "${options[telnet]}" ]; then
(while true; do echo Telnet `hostname`; nc -nvlt -p 23 -e `which hostname`; done) &
fi
+if [ -n "${options[ssh]}" ]; then
+ echo Starting SSH server
+ /usr/local/sbin/sshd
+elif [ -n "${options[sshv1]}" ]; then
+ echo Starting SSHv1 server
+ echo 'Protocol 1' >> /usr/local/etc/sshd_config
+ /usr/local/sbin/sshd
+fi
+
if [ -n "${options[bacnet]}" ]; then
echo Starting bacnet loop device.
java -cp bacnetTests/build/libs/bacnet-1.0-SNAPSHOT-all.jar \
@@ -136,9 +145,21 @@ elif [ -n "${options[bacnetfail]}" ]; then
FauxDeviceEngine.EntryPoint $local_ip $broadcast_ip "Faux-Device-Fail.json" &
fi
-if [ -n "${options[ntp_client]}" ]; then
- echo Starting ntp client.
- java -jar NTPClient/build/libs/NTPClient-1.0-SNAPSHOT.jar "time.google.com" "123" "3" &
+# Queries the NTP server learnt from DHCP.
+if [ -n "${options[ntpv4]}" ]; then
+ (while date; do
+ dhcp_ntp=$(fgrep NTPSERVERS= /run/ntpdate.dhcp)
+ ntp_server=`echo $dhcp_ntp | cut -d "'" -f 2`
+ echo Transmitting NTP query to $ntp_server using NTPv4
+ ntpdate -q -o 4 $ntp_server
+ sleep 5
+ done) &
+elif [ -n "${options[ntpv3]}" ]; then
+ (while date; do
+ echo Transmitting NTP query to time.google.com using NTPv3
+ ntpdate -q -o 3 time.google.com
+ sleep 5
+ done) &
fi
# ntp_pass queries the NTP server learnt from DHCP. ntp_fail sends to time.google.com
@@ -155,7 +176,7 @@ if [ -n "${options[ntp_pass]}" -o -n "${options[ntp_fail]}" ]; then
fi
echo Transmitting NTP query to $ntp_server
ntpdate -q -p 1 $ntp_server
- sleep 10
+ sleep 5
done) &
fi
diff --git a/docker/include/bin/test_ping b/docker/include/bin/test_ping
index 99bc957d83..b66393a079 100755
--- a/docker/include/bin/test_ping
+++ b/docker/include/bin/test_ping
@@ -88,8 +88,8 @@ echo Done with basic connectivity tests | tee -a $MONO_LOG
echo Checking startup NTP
ntp_target=${TARGET_IP%.*}.2
-ntp_request=`tcpdump -env -c 1 -r /scans/startup.pcap dst port 123 | wc -l`
-ntp_proper=`tcpdump -env -c 1 -r /scans/startup.pcap dst port 123 and dst host $ntp_target | wc -l`
+ntp_request=`tcpdump -env -c 1 -r /scans/monitor.pcap dst port 123 | wc -l`
+ntp_proper=`tcpdump -env -c 1 -r /scans/monitor.pcap dst port 123 and dst host $ntp_target | wc -l`
if [ "$ntp_request" == 0 ]; then
ntp_result=skip
ntp_summary="No NTP traffic detected"
diff --git a/docker/include/networking_scripts/change_dhcp_range b/docker/include/networking_scripts/change_dhcp_range
new file mode 100755
index 0000000000..0c45cd6bae
--- /dev/null
+++ b/docker/include/networking_scripts/change_dhcp_range
@@ -0,0 +1,24 @@
+#!/bin/bash -e
+#
+# Dynamically change DHCP lease range, requires killing and restarting
+# dnsmasq as per documentation (SIGHUP does not reload configuration file).
+LOCAL_IF=${LOCAL_IF:-$HOSTNAME-eth0}
+
+range_start=$1
+range_end=$2
+prefix_len=$3
+if [ -z $range_start -o -z $range_end -o -z $prefix_len ]; then
+ echo "Usage: change_dhcp_range range_start range_end prefix_len"
+ exit 1
+fi
+while [ $(cat /etc/dnsmasq.conf | egrep "^dhcp-range=" | wc -l) == 0 ]; do
+ sleep 1
+done
+ip addr add $range_start/$prefix_len dev $LOCAL_IF || true
+original=$(cat /etc/dnsmasq.conf | egrep "^dhcp-range=" | head -1)
+lease=$(echo $original | cut -d',' -f 3)
+if [ -n "lease" ]; then
+ lease=",$lease"
+fi
+new="dhcp-range=$range_start,$range_end$lease"
+flock /etc/dnsmasq.conf sed -i s/$original/$new/ /etc/dnsmasq.conf
\ No newline at end of file
diff --git a/docker/include/networking_scripts/change_lease_time b/docker/include/networking_scripts/change_lease_time
index 306e985604..0cb8986c8a 100755
--- a/docker/include/networking_scripts/change_lease_time
+++ b/docker/include/networking_scripts/change_lease_time
@@ -7,10 +7,10 @@ if [ -z $lease ]; then
echo "Lease time not defined."
exit 1
fi
-while [ $(cat /etc/dnsmasq.conf | grep dhcp-range=10.20 | wc -l) == 0 ]; do
+while [ $(cat /etc/dnsmasq.conf | grep "^dhcp-range=" | wc -l) == 0 ]; do
sleep 1
done
-original=$(cat /etc/dnsmasq.conf | grep dhcp-range=10.20 | head -1)
+original=$(cat /etc/dnsmasq.conf | grep "^dhcp-range=" | head -1)
new="$(echo $original | cut -d',' -f 1,2),$lease"
flock /etc/dnsmasq.conf sed -i s/$original/$new/ /etc/dnsmasq.conf
diff --git a/docker/modules/Dockerfile.faux1 b/docker/modules/Dockerfile.faux1
index 7c8e9c0be0..69f7c0edef 100644
--- a/docker/modules/Dockerfile.faux1
+++ b/docker/modules/Dockerfile.faux1
@@ -18,13 +18,23 @@ RUN bin/retry_cmd git clone https://github.com/grafnu/bacnet4j.git --single-bran
COPY pubber/ pubber/
RUN pubber/bin/build
+# Seperate stage to build older version of SSH and SSL
+FROM daqf/aardvark:latest as ssh_build
+
+RUN $AG update && $AG install wget make build-essential gcc libz-dev ca-certificates
+
+# Build SSH, OpenSSL from source and configure
+
+COPY subset/security/sshfaux/*.sh ./
+RUN sh ssh_build.sh
+
FROM daqf/aardvark:latest
# Run this separately so it can be shared with other builds.
RUN $AG update && $AG install openjdk-8-jre
RUN $AG update && $AG install openjdk-8-jdk git
RUN $AG update && $AG install isc-dhcp-client ethtool network-manager netcat curl\
- python ifupdown openssl ssh nano apache2-utils ntpdate
+ python ifupdown openssl nano apache2-utils ntpdate
# Additional OS dependencies
RUN $AG update && $AG install -y telnetd && $AG install xinetd nginx
@@ -52,17 +62,19 @@ COPY subset/bacnet/bacnetTests/src/main/resources/Faux*.json tmp/
COPY --from=java_build /root/bacnet4j/bacnet4j-1.0-SNAPSHOT-all.jar bacnetTests/libs/
RUN cd bacnetTests && ./gradlew build
-# SSH dependency
-COPY subset/security/ssh_additions.config ssh_additions.config
-RUN cat ssh_additions.config >> /etc/ssh/sshd_config
-
# HTTP/HTTPS dependency
COPY subset/security/nginxpass.conf /root/nginx/
COPY subset/security/nginxfail.conf /root/nginx/
COPY subset/security/nginx-site /var/www/nginx-site
-# SSH login fix. Otherwise user is kicked off after login
-RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
+COPY --from=ssh_build /usr/local/openssl/* /usr/local/openssl/
+COPY --from=ssh_build /usr/local/sbin/* /usr/local/sbin/
+COPY --from=ssh_build /usr/local/bin/* /usr/local/bin/
+COPY --from=ssh_build /usr/local/etc/* /usr/local/etc/
+
+COPY subset/security/sshfaux/ssh_privsep.sh ssh_privsep.sh
+RUN sh ssh_privsep.sh
+RUN /usr/local/bin/ssh-keygen -A
# Weird workaround for problem running tcdump in a privlidged container.
RUN mv /usr/sbin/tcpdump /usr/bin/tcpdump
diff --git a/docs/add_test.md b/docs/add_test.md
index f279359570..e460137584 100644
--- a/docs/add_test.md
+++ b/docs/add_test.md
@@ -34,7 +34,7 @@ A setup for the `pass` test, as an example, woud be configured as follows
* `echo host_tests=local/local_tests.conf >> local/system.conf` -- Set tests configuration.
This, of course, only works for local development when using the `local_tests.conf` config. To
-formalize a test and include it in the overal system build it should be included in
+formalize a test and include it in the overall system build it should be included in
`config/modules/all.conf`.
## Component Build
diff --git a/docs/changelog.md b/docs/changelog.md
index a67a7cfd32..e3a796f4a1 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -1,4 +1,18 @@
# Changelog
+* 1.7.0
+ * Add DAQ version to origin summary (#522)
+ * Add check for git version tag in Travis (#519)
+ * Minor UDMI updates for pubber keygen
+ * Update Minimum Send Test (#498)
+ * Universal Switch Interface (USI) (#496)
+* 1.6.1
+ * fix image pull in cmd/build (#503)
+* 1.6.0
+ * cloud test setup documentation (#495)
+ * Baseline for NTP tests (#494)
+ * Baseline for DNS test (#492)
+ * Add manual test summary to test report (#481)
+ * UDMI logentry schema update (#391)
* 1.5.1
* Fix for local-port-as-string issue (#477)
* 1.5.0
diff --git a/docs/cloud_tests.md b/docs/cloud_tests.md
new file mode 100644
index 0000000000..6ed8a9efed
--- /dev/null
+++ b/docs/cloud_tests.md
@@ -0,0 +1,133 @@
+# Cloud Connection Testing
+
+A number of additional setup steps are required for enabling testing against "smart devices"
+that communicate with the cloud. The tests themselves are part of the `subset/cloud/test_udmi`
+module included in the standard DAQ distro. The same basic device-to-cloud validation test
+pipeline can be done manually and automatically (through DAQ); it's instructive to fully
+understand the manual test pipeline before engaging with the automated setup.
+
+## Manual Test Pipeline
+
+The overall device-to-cloud pipeline looks something like the following:
+
+* Device sends data to the cloud. There's two kinds of devices:
+ * A faux _reference design_ device called [pubber](pubber.md), which is a completely contained
+ software device.
+ * An actual physical device. The setup and configuration of that device will be manufacturer
+ dependent and so is out of scope for this (DAQ) documentation.
+* A configured GCP IoT Core project, registry, and device entry. The
+[GCP docs for IoT Core](https://cloud.google.com/iot/docs/how-tos/devices) describe the basics. The
+key part is the _authentication key_ (hahaha) that needs to be setup between the local device and
+cloud device entry.
+* The IoT Core registry is configured with a _PubSub topic_ (not to be confused with an _MQTT topic_),
+that provides the bridge between incoming data and consumers of that data. See the GCP documentation
+on PubSub for more details.
+* (optional) The `gcloud` command line can be used to validate that data is being sent from the
+device to the cloud. Something like
+`gcloud pubsub subscriptions pull --auto-ack projects/{project}/subscriptions/{sub_id}`.
+(Complete documentation for how to use `gcloud` commands is out of scope of this documentation.)
+* The [validator tool](validator.md) is what programmatically validates a device data stream, and
+is what is ultimately used by `test_udmi` to validate device-cloud communication.
+
+## Base Local Test Setup
+
+* The `udmi` module needs to be enabled in build. When running `cmd/build` there should be a line
+like `subset/cloud/Dockerfile.test_udmi` in the startup logs.
+This is enabled through the `host_tests` config parameter,
+which can be set to `config/modules/all.conf` if necessary. On startup, there should be a log
+message that includes `udmi`:
+```
+Jun 22 08:32:52 runner INFO Configured with tests pass, fail, ping, bacnet, mudgee, nmap, discover, switch, macoui, bacext, tls, password, udmi, manual
+```
+* A testing gcp service account `gcp_cred` needs to be setup as described in
+[service account setup instructions](service.md).
+* The system's default `module_config` needs to enable the `udmi` test, e.g. as per
+`resources/setups/baseline/module_config.json`. This can be validated by (runtime) checking
+`inst/run-port-01/nodes/udmi01/tmp/module_config.json` to see if it has something like the following:
+```
+ "udmi": {
+ "enabled": true
+ }
+```
+* `site_path` config needs to point to a site definition directory, or defaults to `local/site`.
+This contains all the site-specific information about devices needed for testing.
+* `{site_path}/mac_addrs/{mac_addr}/module_config.json` needs to have a `device_id` defined, e.g.
+as in `resources/test_site/mac_addrs/3c5ab41e8f0b/module_config.json`.
+* The GCP IoT Core setup needs to have a proper registry and device configred. This can either
+be done manually or using the [registrar tool](registrar.md) tool.
+
+## Integration Testing
+
+If developing cloud-tests, then the CI build system also needs to have a service account configured
+pointing at a suitable GCP project. To run cloud-based tests, setup the Travis `GCP_BASE64_CRED`
+env variable with a `base64` encoded service account key for your project. It's recommended to
+use a dedicated key with a nice name like `daq-travis`, but not required. Encode the key value
+as per below, and cut/paste the resulting string into a
+[Travis environment variable](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings)
+for a `GCP_BASE64_CRED` variable. Note the `-w 0` option is required for proper parsing/formatting,
+as there can't be any newlines in the copied string.
+
+
+$ base64 -w 0 local/gcp_service_account.json
+ewoICJ1eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAiYm9zLWRhcS10ZXN0aW5nIiwKICAicHJpd
+…
+iOiAiaHR0cHM6Ly93LWRhcS10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIgp9Cg==
+
+
+### Travis CI Testing
+
+* Run the [registrar tool](registrar.md) to properly configure the cloud project.
+* `gcp_topic` config to `local/system.conf` as described in this doc.
+* Configure test subsystem with proper cloud endpoint in `{test_site}/cloud_iot_config.json`.
+* Configure the DUT with the proper cloud device credentials (device specific). For _faux_ devices, this means copying
+the associated `rsa_private.pkcs8` file to something like `inst/faux/daq-faux-2/local/` (exact path depends on which faux).
+* Test with `bin/registrar`, `pubber/bin/run`, and `bin/validate` manually, before integrated testing through DAQ.
+
+### Is my Travis set up correctly?
+
+If Travis is set up correctly, you should see messages at the beginning of the log file:
+```
+Setting environment variables from repository settings
+$ export DOCKER_USERNAME=[secure]
+$ export DOCKER_PASSWORD=[secure]
+$ export GCP_BASE64_CRED=[secure]
+```
+
+Further down there would be more details about the cred itself:
+```
+Running test script testing/test_aux.sh
+Writing test results to inst/test_aux.out and inst/test_aux.gcp
+Decoding GCP_BASE64_CRED to inst/config/gcp_service_account.json
+base64 wc: 1 1 3097
+GCP service account is "daq-travis@daq-testing.iam.gserviceaccount.com"
+```
+
+If the `3097` character count is wildly off, then likely something went wrong with the newlines.
+
+### Travis Build For "External" Pull Requests
+
+Travis will not use encrypted environment variables when testing against pull requests
+from foreign github repositories, even if you've forked from another repository that you
+have full control of via Github. Travis authorization != Github authorization, even if
+you sign into Travis using Github! This is as it should be b/c security. see the following
+for more info:
+
+- https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings
+- https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions
+
+If your test is failing from a PR, you'll see something like in a similar log location:
+
+```
+Encrypted environment variables have been removed for security reasons.
+See https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions
+Setting environment variables from .travis.yml
+$ export DOCKER_STARTUP_TIMEOUT_MS=60000
+$ export DAQ_TEST=aux
+```
+
+### Other Travis Caveats
+
+Take note the URL in your browser's address bar when running Travis. You might be on either
+travis-ci.com or travis-ci.org. Any particular setup
+may end up across both sites for undetermined reasons. Please consult with your browser's
+exact URL for more clarity.
diff --git a/docs/device_report.md b/docs/device_report.md
index 2ab3ab3320..4a5be26839 100644
--- a/docs/device_report.md
+++ b/docs/device_report.md
@@ -56,7 +56,7 @@ Overall device result FAIL
|---|---|---|---|---|
|Required|1|0|0|0|
|Recommended|2|0|0|0|
-|Other|1|2|22|2|
+|Other|2|2|22|2|
|Result|Test|Category|Expectation|Notes|
|---|---|---|---|---|
@@ -67,6 +67,7 @@ Overall device result FAIL
|skip|cloud.udmi.state|Other|Other|No device id|
|skip|cloud.udmi.system|Other|Other|No device id|
|fail|connection.mac_oui|Other|Other|Manufacturer prefix not found!|
+|pass|connection.network.ntp_support|Other|Other|Using NTPv4.|
|skip|connection.port_duplex|Other|Other|No local IP has been set, check system config|
|skip|connection.port_link|Other|Other|No local IP has been set, check system config|
|skip|connection.port_speed|Other|Other|No local IP has been set, check system config|
@@ -92,15 +93,6 @@ Overall device result FAIL
|gone|unknown.fake.monkey|Other|Other||
-## Module ipaddr
-
-
-#### Module Config
-
-|Attribute|Value|
-|---|---|
-|timeout_sec|300|
-
## Module pass
@@ -567,5 +559,25 @@ RESULT pass manual.test.travis Manual test - for testing
|---|---|
|enabled|True|
+## Module ntp
+
+
+#### Report
+
+```
+--------------------
+connection.network.ntp_support
+--------------------
+Device supports NTP version 4.
+--------------------
+RESULT pass connection.network.ntp_support Using NTPv4.
+```
+
+#### Module Config
+
+|Attribute|Value|
+|---|---|
+|enabled|True|
+
## Report complete
diff --git a/docs/integration_testing.md b/docs/integration_testing.md
deleted file mode 100644
index 1c6435a9b0..0000000000
--- a/docs/integration_testing.md
+++ /dev/null
@@ -1,83 +0,0 @@
-# Integration Testing
-
-DAQ currently uses Travis CI for integration testing: https://travis-ci.org/
-
-## Configuration
-
-The `test_udmi` test module uses the Registrar and Validator to check that a device is
-properly communicating through Cloud IoT, automated through DAQ.
-
-### GCP Credential
-
-To run cloud-based tests, setup the Travis `GCP_BASE64_CRED` env variable with a `base64` encoded
-service account key for your project. It's recommended to use a dedicated key with a nice name
-like `daq-travis`, but not required. Encode the key value as per below, and cut/paste the
-resulting string into a
-[Travis environment variable](https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings)
-for a `GCP_BASE64_CRED` varaible. Note the `-w 0` option is required for proper parsing/formatting,
-as there can't be any newlines in the copied string.
-
-
-$ base64 -w 0 local/gcp_service_account.json
-ewoICJ1eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAiYm9zLWRhcS10ZXN0aW5nIiwKICAicHJpd
-…
-iOiAiaHR0cHM6Ly93LWRhcS10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIgp9Cg==
-
-
-## Travis CI Testing
-
-* Run the [registrar tool](registrar.md) to properly configure the cloud project.
-* `gcp_topic` config to `local/system.conf` as described in this doc.
-* Configure test subsystem with proper cloud endpoint in `{test_site}/cloud_iot_config.json`.
-* Configure the DUT with the proper cloud device credentials (device specific). For _faux_ devices, this means copying
-the assocatied `rsa_private.pkcs8` file to someting like `inst/faux/daq-faux-2/local/` (exact path depends on which faux).
-* Test with `bin/registrar`, `pubber/bin/run`, and `bin/validate` manually, before integrated testing through DAQ.
-
-### Is my Travis set up correctly?
-
-If Travis is set up correctly, you should see messages at the beginning of the log file:
-```
-Setting environment variables from repository settings
-$ export DOCKER_USERNAME=[secure]
-$ export DOCKER_PASSWORD=[secure]
-$ export GCP_BASE64_CRED=[secure]
-```
-
-Further down there would be more details about the cred itself:
-```
-Running test script testing/test_aux.sh
-Writing test results to inst/test_aux.out and inst/test_aux.gcp
-Decoding GCP_BASE64_CRED to inst/config/gcp_service_account.json
-base64 wc: 1 1 3097
-GCP service account is "daq-travis@daq-testing.iam.gserviceaccount.com"
-```
-
-If the `3097` character count is wildly off, then likely something went wrong with the newlines.
-
-### Travis Build For "External" Pull Requests
-
-Travis will not use encrypted environment variables when testing against pull requests
-from foreign github repositories, even if you've forked from another repository that you
-have full control of via Github. Travis authorization != Github authorization, even if
-you sign into Travis using Github! This is as it should be b/c security. see the following
-for more info:
-
-- https://docs.travis-ci.com/user/environment-variables/#defining-variables-in-repository-settings
-- https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions
-
-If your test is failing from a PR, you'll see something like in a similar log location:
-
-```
-Encrypted environment variables have been removed for security reasons.
-See https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions
-Setting environment variables from .travis.yml
-$ export DOCKER_STARTUP_TIMEOUT_MS=60000
-$ export DAQ_TEST=aux
-```
-
-### Other Travis Caveats
-
-Take note the URL in your browser's address bar when running Travis. You might be on either
-travis-ci.com or travis-ci.org. Any particular setup
-may end up across both sites for undertermined reasons. Please consult with your browser's
-exact URL for more clarity.
diff --git a/docs/orchestration.md b/docs/orchestration.md
index 4804ec356f..703c5235a0 100644
--- a/docs/orchestration.md
+++ b/docs/orchestration.md
@@ -11,7 +11,7 @@ to change.
## Data Rouces
-The overal orchestration capability relies on several simple data sources:
+The overall orchestration capability relies on several simple data sources:
1. [Overall network topology](topologies.md), which indicates how the network hardware is configured.
2. [Device MUD files](../mud_files), which provide an
[IETF Standard MUD descriptor](https://datatracker.ietf.org/doc/draft-ietf-opsawg-mud/) that describes
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
index 4a8fa1ba09..05aca2c9d6 100644
--- a/docs/troubleshooting.md
+++ b/docs/troubleshooting.md
@@ -9,7 +9,7 @@ mailing list, and use it as the primary source of troubleshooting.
email somebody directly, but will likely result in a slower response time.
* The `inst/cmdrun.log` file contains a copy of the console output from DAQ.
* This file should be attached to communications about resolving DAQ issues.
- * It's not necessary to include any assocaited `local/system.yaml` file, since the
+ * It's not necessary to include any associated `local/system.yaml` file, since the
contents of that are already included.
* Make sure everything is running properly using the internal simulation setup
before tackling anything to do with external switches or physical devices.
@@ -29,12 +29,17 @@ a summary of all test results.
* The determination of _PASS_ vs. _FAIL_ is one of policy, not a technical
consideration. If the question is "Is it OK if this tests fails or not?" then
you need to contact whomever is responsible for policy, not DAQ-proper.
- * The reports are _optionally_ available trough the _optionally_ configured
+ * The reports are _optionally_ available through the _optionally_ configured
GCP instance, but that's only relevant after the basics are working.
-* Capturing a complete zip of the `inst/` directory should encompass all the
-state neesary to diagnose/debug problems, so simply captuing that and sending
-it along would be sufficient in most cases. Be wary of file size, as `inst/`
-can collect cruft over time and occasionally need to be cleaned.
+* Running `bin/techsupport.sh` will create a zipped techsupport file that
+ contains all configuration, packet captures and runtime logs of a run.
+ Sending that file is sufficient in most cases. Be wary of file
+ size, as `inst/` might have large pcap files or older files that can be
+ trimmed to get more manageable file sizes for email attachments.
+* Unless you are developing for DAQ and want the latest code, ensure that you
+ are on the latest stable software version tracked by the git tag `release_stable`.
+* If a test run blocks or errors out, try running `bin/troubleshoot` to detect
+ some common misconfiguration and setup related issues.
## Test-Specific
@@ -82,4 +87,4 @@ directory.
* Filter results for the device's MAC address with something like:
tcpdump -en -r testing.pacp ether host de:vi:ce:ma:ca:dr.
* There is no one-size-fits-all guidance here, because what is expected is
- extremeley test-specific.
+ extremely test-specific.
diff --git a/etc/docker_images.txt b/etc/docker_images.txt
index 193e733424..78ef0f7a7d 100644
--- a/etc/docker_images.txt
+++ b/etc/docker_images.txt
@@ -1,23 +1,24 @@
-daqf/aardvark 34718b2f3fd5
-daqf/default 3ac95db36ee4
-daqf/faucet 45c13344a8ed
-daqf/faux1 ecff07f12534
-daqf/faux2 39914ae11741
-daqf/gauge 1431053cf25e
-daqf/networking af56b0732100
-daqf/switch 67954aca8dce
-daqf/test_bacext 363b6d476ac8
-daqf/test_bacnet 073a0eb5529f
-daqf/test_brute 700d986d5e83
-daqf/test_discover ad34b17b41e6
-daqf/test_fail c9a7e6b43bd0
-daqf/test_hold cb120980c658
-daqf/test_macoui a828288c855b
-daqf/test_mudgee d4ed15ef1dfc
-daqf/test_nmap 78aa5def41e5
-daqf/test_pass 74167ef0df55
-daqf/test_password 471bd1290918
-daqf/test_ping 5618e0243643
-daqf/test_switch 47585fc0876e
-daqf/test_tls 9c5f28b74fed
-daqf/test_udmi fc13d4c80b0d
+daqf/aardvark 13e07616906a
+daqf/default 8547decf4b0c
+daqf/faucet 0bd65761a824
+daqf/faux1 500fc556e362
+daqf/faux2 65be2e8aaff5
+daqf/gauge 399cf3f0cf26
+daqf/networking e7d0b7cea324
+daqf/switch 0a7b905f10fa
+daqf/test_bacext 765b3fd4f471
+daqf/test_bacnet 1cdac0876850
+daqf/test_brute 9d046780449f
+daqf/test_discover 6bb39aebc6d9
+daqf/test_fail 21b8d383d676
+daqf/test_hold 2c2dbda2fb23
+daqf/test_macoui 890bc044e327
+daqf/test_manual 156a1947c7f4
+daqf/test_mudgee 44a4ad7a9615
+daqf/test_nmap 6e97b5498219
+daqf/test_pass 95e9680cef60
+daqf/test_password 1bc14db7767e
+daqf/test_ping 45e3f58e30a2
+daqf/test_switch 57cf3951b2e3
+daqf/test_tls f93b7fec95a4
+daqf/test_udmi 771e5969564d
diff --git a/etc/docker_images.ver b/etc/docker_images.ver
index 26ca594609..bd8bf882d0 100644
--- a/etc/docker_images.ver
+++ b/etc/docker_images.ver
@@ -1 +1 @@
-1.5.1
+1.7.0
diff --git a/firebase/functions/index.js b/firebase/functions/index.js
index f17a3d2c6c..12b6a11875 100644
--- a/firebase/functions/index.js
+++ b/firebase/functions/index.js
@@ -123,7 +123,7 @@ function handleTestResult(origin, siteName, message) {
const deviceDoc = originDoc.collection('device').doc(message.device_id);
const updates = [
- originDoc.set({ 'updated': timestamp }),
+ originDoc.set({ 'updated': timestamp }, { merge: true }),
siteDoc.set({ 'updated': timestamp }),
portDoc.set({ 'updated': timestamp }),
deviceDoc.set({ 'updated': timestamp })
@@ -193,17 +193,22 @@ function handleTestResult(origin, siteName, message) {
function handleHeartbeat(origin, message) {
const timestamp = new Date().toJSON();
const originDoc = db.collection('origin').doc(origin);
- console.log('heartbeat', timestamp, origin)
+ console.log('heartbeat', timestamp, origin, message)
const heartbeatDoc = originDoc.collection('runner').doc('heartbeat');
return Promise.all([
- originDoc.set({ 'updated': timestamp }),
+ originDoc.set({
+ 'updated': timestamp,
+ 'version': message.version
+ }),
heartbeatDoc.get().then((result) => {
const current = result.data();
- if (!current || !current.message || current.message.timestamp < message.timestamp)
+ const defined = current && current.message && current.message.timestamp;
+ if (!defined || current.message.timestamp < message.timestamp) {
return heartbeatDoc.set({
'updated': timestamp,
message
});
+ }
})
]);
}
diff --git a/firebase/public/index.html b/firebase/public/index.html
index d9ce546c59..c4928cd090 100644
--- a/firebase/public/index.html
+++ b/firebase/public/index.html
@@ -31,14 +31,14 @@
Filters:
-
-
-
-
+
+
+
+
@@ -52,10 +52,10 @@
Filters:
-
Sites
-
Origins
+
Sites
+
Users
@@ -84,4 +84,4 @@ Users
if (typeof daq_deploy_version !== 'undefined') {
document.getElementById('deploy-version').innerHTML = daq_deploy_version;
}
-
\ No newline at end of file
+
diff --git a/firebase/public/main.js b/firebase/public/main.js
index 09f4237a5e..a6eeee087f 100644
--- a/firebase/public/main.js
+++ b/firebase/public/main.js
@@ -8,11 +8,6 @@ const display_columns = [];
const display_rows = [];
const row_timestamps = {};
-const data_state = {};
-
-let last_result_time_sec = 0;
-let heartbeatTimestamp = 0;
-
const origin_id = getQueryParam('origin');
const site_name = getQueryParam('site');
const port_id = getQueryParam('port');
@@ -21,8 +16,13 @@ const device_id = getQueryParam('device');
const run_id = getQueryParam('runid');
const from = getQueryParam('from');
const to = getQueryParam('to');
+
+const data_state = {};
+let last_result_time_sec = 0;
+let heartbeatTimestamp = 0;
var db;
-var activePorts = [];
+var activePorts = new Set();
+
document.addEventListener('DOMContentLoaded', () => {
db = firebase.firestore();
const settings = {
@@ -289,7 +289,7 @@ function watcherAdd(ref, collection, limit, handler) {
}, (e) => console.error(e));
}
-function listSites(db) {
+function listSites() {
const linkGroup = document.querySelector('#listings .sites');
db.collection('site').get().then((snapshot) => {
snapshot.forEach((site_doc) => {
@@ -303,21 +303,31 @@ function listSites(db) {
}).catch((e) => statusUpdate('registry list error', e));
}
-function listOrigins(db) {
- const linkGroup = document.querySelector('#listings .origins');
+function addOrigin(originId) {
+ db.collection('origin').doc(originId).get().then((result) => {
+ const linkGroup = document.querySelector('#listings .origins');
+ const originLink = document.createElement('a');
+ originLink.setAttribute('href', '/?origin=' + originId);
+ originLink.innerHTML = originId;
+ linkGroup.appendChild(originLink);
+ const originInfo = document.createElement('span');
+ const version = result.data() && result.data().version;
+ const updated = result.data() && result.data().updated;
+ originInfo.innerHTML = ` ${version}, ${updated}`;
+ linkGroup.appendChild(originInfo);
+ linkGroup.appendChild(document.createElement('p'));
+ });
+}
+
+function listOrigins() {
db.collection('origin').get().then((snapshot) => {
snapshot.forEach((originDoc) => {
- const origin = originDoc.id;
- const originLink = document.createElement('a');
- originLink.setAttribute('href', '/?origin=' + origin);
- originLink.innerHTML = origin;
- linkGroup.appendChild(originLink);
- linkGroup.appendChild(document.createElement('p'));
+ addOrigin(originDoc.id);
});
}).catch((e) => statusUpdate('origin list error', e));
}
-function listUsers(db) {
+function listUsers() {
const link_group = document.querySelector('#listings .users');
db.collection('users').get().then((snapshot) => {
snapshot.forEach((user_doc) => {
@@ -354,9 +364,9 @@ function dashboardSetup() {
triggerOrigin(db, origin_id);
} else {
document.getElementById('listings').classList.add('active');
- listSites(db);
- listOrigins(db);
- listUsers(db);
+ listOrigins();
+ listSites();
+ listUsers();
}
return origin_id;
diff --git a/firebase/public/protos.hash b/firebase/public/protos.hash
index bab39e76c2..786633c8a9 100644
--- a/firebase/public/protos.hash
+++ b/firebase/public/protos.hash
@@ -1 +1 @@
-b7a56a30dafe26576d6bdef00dfb57dc07a016ac proto/system_config.proto
+b335b4bd73bb5242e822a9b72cf4de6bd010cea3 proto/system_config.proto
diff --git a/firebase/public/protos.html b/firebase/public/protos.html
index d24bed4958..13290969b0 100644
--- a/firebase/public/protos.html
+++ b/firebase/public/protos.html
@@ -198,6 +198,10 @@ Table of Contents
MSwitchSetup
+
+ MUSISetup
+
+
@@ -478,6 +482,13 @@ DaqConfig
Set time between port disconnect and host tests shutdown |
+
+ | usi_setup |
+ USISetup |
+ |
+ USI url |
+
+
@@ -613,7 +624,14 @@ SwitchSetup
lo_port |
int32 |
|
- Local port of open flow controller |
+ Local port of DAQ OpenFlow controller |
+
+
+
+ | alt_port |
+ int32 |
+ |
+ Local port for an alternate OpenFlow controller |
@@ -679,6 +697,30 @@ SwitchSetup
+ USISetup
+ USI paramters
+
+
+
+
+ | Field | Type | Label | Description |
+
+
+
+
+ | url |
+ string |
+ |
+ |
+
+
+
+
+
+
+
+
+
diff --git a/libs/proto/system_config_pb2.py b/libs/proto/system_config_pb2.py
index e4746e687f..d82945d1c4 100644
--- a/libs/proto/system_config_pb2.py
+++ b/libs/proto/system_config_pb2.py
@@ -18,7 +18,7 @@
package='',
syntax='proto3',
serialized_options=None,
- serialized_pb=b'\n\x1d\x64\x61q/proto/system_config.proto\"\xfb\x07\n\tDaqConfig\x12\x18\n\x10site_description\x18\x01 \x01(\t\x12\x18\n\x10monitor_scan_sec\x18\x02 \x01(\x05\x12\x1b\n\x13\x64\x65\x66\x61ult_timeout_sec\x18\x03 \x01(\x05\x12\x12\n\nsettle_sec\x18& \x01(\x05\x12\x11\n\tbase_conf\x18\x04 \x01(\t\x12\x11\n\tsite_path\x18\x05 \x01(\t\x12\x1f\n\x17initial_dhcp_lease_time\x18\x06 \x01(\t\x12\x17\n\x0f\x64hcp_lease_time\x18\x07 \x01(\t\x12\x19\n\x11\x64hcp_response_sec\x18\' \x01(\x05\x12\x1e\n\x16long_dhcp_response_sec\x18\x08 \x01(\x05\x12\"\n\x0cswitch_setup\x18\t \x01(\x0b\x32\x0c.SwitchSetup\x12\x12\n\nhost_tests\x18\x10 \x01(\t\x12\x13\n\x0b\x62uild_tests\x18$ \x01(\x08\x12\x11\n\trun_limit\x18\x11 \x01(\x05\x12\x11\n\tfail_mode\x18\x12 \x01(\x08\x12\x13\n\x0bsingle_shot\x18\" \x01(\x08\x12\x15\n\rresult_linger\x18\x13 \x01(\x08\x12\x0f\n\x07no_test\x18\x14 \x01(\x08\x12\x11\n\tkeep_hold\x18( \x01(\x08\x12\x14\n\x0c\x64\x61q_loglevel\x18\x15 \x01(\t\x12\x18\n\x10mininet_loglevel\x18\x16 \x01(\t\x12\x13\n\x0b\x66inish_hook\x18# \x01(\t\x12\x10\n\x08gcp_cred\x18\x17 \x01(\t\x12\x11\n\tgcp_topic\x18\x18 \x01(\t\x12\x13\n\x0bschema_path\x18\x19 \x01(\t\x12\x11\n\tmud_files\x18\x1a \x01(\t\x12\x14\n\x0c\x64\x65vice_specs\x18\x1b \x01(\t\x12\x13\n\x0btest_config\x18\x1c \x01(\t\x12\x19\n\x11port_debounce_sec\x18\x1d \x01(\x05\x12\x11\n\tfail_hook\x18\x1e \x01(\t\x12\x17\n\x0f\x64\x65vice_template\x18\x1f \x01(\t\x12\x14\n\x0csite_reports\x18 \x01(\t\x12\x1f\n\x17run_data_retention_days\x18! \x01(\x02\x12.\n\ninterfaces\x18% \x03(\x0b\x32\x1a.DaqConfig.InterfacesEntry\x12/\n\x0b\x66\x61il_module\x18/ \x03(\x0b\x32\x1a.DaqConfig.FailModuleEntry\x12\x1d\n\x15port_flap_timeout_sec\x18\x30 \x01(\x05\x1a=\n\x0fInterfacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.Interface:\x02\x38\x01\x1a\x31\n\x0f\x46\x61ilModuleEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe2\x01\n\x0bSwitchSetup\x12\x11\n\tctrl_intf\x18\t \x01(\t\x12\x0f\n\x07ip_addr\x18\x0b \x01(\t\x12\x13\n\x0buplink_port\x18\r \x01(\x05\x12\x0f\n\x07lo_port\x18\x0e \x01(\x05\x12\x0f\n\x07lo_addr\x18\x0f \x01(\t\x12\x11\n\tmods_addr\x18\x10 \x01(\t\x12\x0f\n\x07of_dpid\x18) \x01(\t\x12\x11\n\tdata_intf\x18* \x01(\t\x12\x0e\n\x06\x65xt_br\x18+ \x01(\t\x12\r\n\x05model\x18, \x01(\t\x12\x10\n\x08username\x18- \x01(\t\x12\x10\n\x08password\x18. \x01(\t\"\'\n\tInterface\x12\x0c\n\x04opts\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x62\x06proto3'
+ serialized_pb=b'\n\x1d\x64\x61q/proto/system_config.proto\"\x99\x08\n\tDaqConfig\x12\x18\n\x10site_description\x18\x01 \x01(\t\x12\x18\n\x10monitor_scan_sec\x18\x02 \x01(\x05\x12\x1b\n\x13\x64\x65\x66\x61ult_timeout_sec\x18\x03 \x01(\x05\x12\x12\n\nsettle_sec\x18& \x01(\x05\x12\x11\n\tbase_conf\x18\x04 \x01(\t\x12\x11\n\tsite_path\x18\x05 \x01(\t\x12\x1f\n\x17initial_dhcp_lease_time\x18\x06 \x01(\t\x12\x17\n\x0f\x64hcp_lease_time\x18\x07 \x01(\t\x12\x19\n\x11\x64hcp_response_sec\x18\' \x01(\x05\x12\x1e\n\x16long_dhcp_response_sec\x18\x08 \x01(\x05\x12\"\n\x0cswitch_setup\x18\t \x01(\x0b\x32\x0c.SwitchSetup\x12\x12\n\nhost_tests\x18\x10 \x01(\t\x12\x13\n\x0b\x62uild_tests\x18$ \x01(\x08\x12\x11\n\trun_limit\x18\x11 \x01(\x05\x12\x11\n\tfail_mode\x18\x12 \x01(\x08\x12\x13\n\x0bsingle_shot\x18\" \x01(\x08\x12\x15\n\rresult_linger\x18\x13 \x01(\x08\x12\x0f\n\x07no_test\x18\x14 \x01(\x08\x12\x11\n\tkeep_hold\x18( \x01(\x08\x12\x14\n\x0c\x64\x61q_loglevel\x18\x15 \x01(\t\x12\x18\n\x10mininet_loglevel\x18\x16 \x01(\t\x12\x13\n\x0b\x66inish_hook\x18# \x01(\t\x12\x10\n\x08gcp_cred\x18\x17 \x01(\t\x12\x11\n\tgcp_topic\x18\x18 \x01(\t\x12\x13\n\x0bschema_path\x18\x19 \x01(\t\x12\x11\n\tmud_files\x18\x1a \x01(\t\x12\x14\n\x0c\x64\x65vice_specs\x18\x1b \x01(\t\x12\x13\n\x0btest_config\x18\x1c \x01(\t\x12\x19\n\x11port_debounce_sec\x18\x1d \x01(\x05\x12\x11\n\tfail_hook\x18\x1e \x01(\t\x12\x17\n\x0f\x64\x65vice_template\x18\x1f \x01(\t\x12\x14\n\x0csite_reports\x18 \x01(\t\x12\x1f\n\x17run_data_retention_days\x18! \x01(\x02\x12.\n\ninterfaces\x18% \x03(\x0b\x32\x1a.DaqConfig.InterfacesEntry\x12/\n\x0b\x66\x61il_module\x18/ \x03(\x0b\x32\x1a.DaqConfig.FailModuleEntry\x12\x1d\n\x15port_flap_timeout_sec\x18\x30 \x01(\x05\x12\x1c\n\tusi_setup\x18\x31 \x01(\x0b\x32\t.USISetup\x1a=\n\x0fInterfacesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.Interface:\x02\x38\x01\x1a\x31\n\x0f\x46\x61ilModuleEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x17\n\x08USISetup\x12\x0b\n\x03url\x18\x01 \x01(\t\"\xf4\x01\n\x0bSwitchSetup\x12\x11\n\tctrl_intf\x18\t \x01(\t\x12\x0f\n\x07ip_addr\x18\x0b \x01(\t\x12\x13\n\x0buplink_port\x18\r \x01(\x05\x12\x0f\n\x07lo_port\x18\x0e \x01(\x05\x12\x10\n\x08\x61lt_port\x18\x10 \x01(\x05\x12\x0f\n\x07lo_addr\x18\x12 \x01(\t\x12\x11\n\tmods_addr\x18\x14 \x01(\t\x12\x0f\n\x07of_dpid\x18) \x01(\t\x12\x11\n\tdata_intf\x18* \x01(\t\x12\x0e\n\x06\x65xt_br\x18+ \x01(\t\x12\r\n\x05model\x18, \x01(\t\x12\x10\n\x08username\x18- \x01(\t\x12\x10\n\x08password\x18. \x01(\t\"\'\n\tInterface\x12\x0c\n\x04opts\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x62\x06proto3'
)
@@ -57,8 +57,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=941,
- serialized_end=1002,
+ serialized_start=971,
+ serialized_end=1032,
)
_DAQCONFIG_FAILMODULEENTRY = _descriptor.Descriptor(
@@ -94,8 +94,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1004,
- serialized_end=1053,
+ serialized_start=1034,
+ serialized_end=1083,
)
_DAQCONFIG = _descriptor.Descriptor(
@@ -357,6 +357,13 @@
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='usi_setup', full_name='DaqConfig.usi_setup', index=36,
+ number=49, type=11, cpp_type=10, label=1,
+ has_default_value=False, default_value=None,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -370,7 +377,38 @@
oneofs=[
],
serialized_start=34,
- serialized_end=1053,
+ serialized_end=1083,
+)
+
+
+_USISETUP = _descriptor.Descriptor(
+ name='USISetup',
+ full_name='USISetup',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='url', full_name='USISetup.url', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1085,
+ serialized_end=1108,
)
@@ -410,56 +448,63 @@
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='lo_addr', full_name='SwitchSetup.lo_addr', index=4,
- number=15, type=9, cpp_type=9, label=1,
+ name='alt_port', full_name='SwitchSetup.alt_port', index=4,
+ number=16, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='lo_addr', full_name='SwitchSetup.lo_addr', index=5,
+ number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='mods_addr', full_name='SwitchSetup.mods_addr', index=5,
- number=16, type=9, cpp_type=9, label=1,
+ name='mods_addr', full_name='SwitchSetup.mods_addr', index=6,
+ number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='of_dpid', full_name='SwitchSetup.of_dpid', index=6,
+ name='of_dpid', full_name='SwitchSetup.of_dpid', index=7,
number=41, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='data_intf', full_name='SwitchSetup.data_intf', index=7,
+ name='data_intf', full_name='SwitchSetup.data_intf', index=8,
number=42, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='ext_br', full_name='SwitchSetup.ext_br', index=8,
+ name='ext_br', full_name='SwitchSetup.ext_br', index=9,
number=43, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='model', full_name='SwitchSetup.model', index=9,
+ name='model', full_name='SwitchSetup.model', index=10,
number=44, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='username', full_name='SwitchSetup.username', index=10,
+ name='username', full_name='SwitchSetup.username', index=11,
number=45, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
- name='password', full_name='SwitchSetup.password', index=11,
+ name='password', full_name='SwitchSetup.password', index=12,
number=46, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
@@ -477,8 +522,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1056,
- serialized_end=1282,
+ serialized_start=1111,
+ serialized_end=1355,
)
@@ -515,8 +560,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=1284,
- serialized_end=1323,
+ serialized_start=1357,
+ serialized_end=1396,
)
_DAQCONFIG_INTERFACESENTRY.fields_by_name['value'].message_type = _INTERFACE
@@ -525,7 +570,9 @@
_DAQCONFIG.fields_by_name['switch_setup'].message_type = _SWITCHSETUP
_DAQCONFIG.fields_by_name['interfaces'].message_type = _DAQCONFIG_INTERFACESENTRY
_DAQCONFIG.fields_by_name['fail_module'].message_type = _DAQCONFIG_FAILMODULEENTRY
+_DAQCONFIG.fields_by_name['usi_setup'].message_type = _USISETUP
DESCRIPTOR.message_types_by_name['DaqConfig'] = _DAQCONFIG
+DESCRIPTOR.message_types_by_name['USISetup'] = _USISETUP
DESCRIPTOR.message_types_by_name['SwitchSetup'] = _SWITCHSETUP
DESCRIPTOR.message_types_by_name['Interface'] = _INTERFACE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@@ -553,6 +600,13 @@
_sym_db.RegisterMessage(DaqConfig.InterfacesEntry)
_sym_db.RegisterMessage(DaqConfig.FailModuleEntry)
+USISetup = _reflection.GeneratedProtocolMessageType('USISetup', (_message.Message,), {
+ 'DESCRIPTOR' : _USISETUP,
+ '__module__' : 'daq.proto.system_config_pb2'
+ # @@protoc_insertion_point(class_scope:USISetup)
+ })
+_sym_db.RegisterMessage(USISetup)
+
SwitchSetup = _reflection.GeneratedProtocolMessageType('SwitchSetup', (_message.Message,), {
'DESCRIPTOR' : _SWITCHSETUP,
'__module__' : 'daq.proto.system_config_pb2'
diff --git a/libs/proto/usi_pb2.py b/libs/proto/usi_pb2.py
new file mode 100644
index 0000000000..9414eb0416
--- /dev/null
+++ b/libs/proto/usi_pb2.py
@@ -0,0 +1,455 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: usi.proto
+
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='usi.proto',
+ package='usi',
+ syntax='proto3',
+ serialized_options=b'\n\004grpcB\010USIProtoP\001',
+ create_key=_descriptor._internal_create_key,
+ serialized_pb=b'\n\tusi.proto\x12\x03usi\"\'\n\x14SwitchActionResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"\x9b\x01\n\rPowerResponse\x12!\n\x19\x63urrent_power_consumption\x18\x01 \x01(\x02\x12\x1d\n\x15max_power_consumption\x18\x02 \x01(\x02\x12$\n\x0bpoe_support\x18\x03 \x01(\x0e\x32\x0f.usi.POESupport\x12\"\n\npoe_status\x18\x04 \x01(\x0e\x32\x0e.usi.POEStatus\"]\n\x11InterfaceResponse\x12$\n\x0blink_status\x18\x01 \x01(\x0e\x32\x0f.usi.LinkStatus\x12\x12\n\nlink_speed\x18\x02 \x01(\x05\x12\x0e\n\x06\x64uplex\x18\x03 \x01(\t\"w\n\nSwitchInfo\x12\x0f\n\x07ip_addr\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65vice_port\x18\x03 \x01(\x05\x12\x1f\n\x05model\x18\x04 \x01(\x0e\x32\x10.usi.SwitchModel\x12\x10\n\x08username\x18\x05 \x01(\t\x12\x10\n\x08password\x18\x06 \x01(\t*W\n\x0bSwitchModel\x12\x17\n\x13\x41LLIED_TELESIS_X230\x10\x00\x12\x0e\n\nCISCO_9300\x10\x01\x12\x0e\n\nOVS_SWITCH\x10\x02\x12\x0f\n\x0b\x46\x41UX_SWITCH\x10\x03*\x1e\n\nLinkStatus\x12\x06\n\x02UP\x10\x00\x12\x08\n\x04\x44OWN\x10\x01*\'\n\nPOESupport\x12\x0b\n\x07\x45NABLED\x10\x00\x12\x0c\n\x08\x44ISABLED\x10\x01*1\n\tPOEStatus\x12\x06\n\x02ON\x10\x00\x12\x07\n\x03OFF\x10\x01\x12\t\n\x05\x46\x41ULT\x10\x02\x12\x08\n\x04\x44\x45NY\x10\x03\x32\xef\x01\n\nUSIService\x12\x31\n\x08GetPower\x12\x0f.usi.SwitchInfo\x1a\x12.usi.PowerResponse\"\x00\x12\x39\n\x0cGetInterface\x12\x0f.usi.SwitchInfo\x1a\x16.usi.InterfaceResponse\"\x00\x12:\n\ndisconnect\x12\x0f.usi.SwitchInfo\x1a\x19.usi.SwitchActionResponse\"\x00\x12\x37\n\x07\x63onnect\x12\x0f.usi.SwitchInfo\x1a\x19.usi.SwitchActionResponse\"\x00\x42\x12\n\x04grpcB\x08USIProtoP\x01\x62\x06proto3'
+)
+
+_SWITCHMODEL = _descriptor.EnumDescriptor(
+ name='SwitchModel',
+ full_name='usi.SwitchModel',
+ filename=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='ALLIED_TELESIS_X230', index=0, number=0,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='CISCO_9300', index=1, number=1,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='OVS_SWITCH', index=2, number=2,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='FAUX_SWITCH', index=3, number=3,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=433,
+ serialized_end=520,
+)
+_sym_db.RegisterEnumDescriptor(_SWITCHMODEL)
+
+SwitchModel = enum_type_wrapper.EnumTypeWrapper(_SWITCHMODEL)
+_LINKSTATUS = _descriptor.EnumDescriptor(
+ name='LinkStatus',
+ full_name='usi.LinkStatus',
+ filename=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='UP', index=0, number=0,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='DOWN', index=1, number=1,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=522,
+ serialized_end=552,
+)
+_sym_db.RegisterEnumDescriptor(_LINKSTATUS)
+
+LinkStatus = enum_type_wrapper.EnumTypeWrapper(_LINKSTATUS)
+_POESUPPORT = _descriptor.EnumDescriptor(
+ name='POESupport',
+ full_name='usi.POESupport',
+ filename=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='ENABLED', index=0, number=0,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='DISABLED', index=1, number=1,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=554,
+ serialized_end=593,
+)
+_sym_db.RegisterEnumDescriptor(_POESUPPORT)
+
+POESupport = enum_type_wrapper.EnumTypeWrapper(_POESUPPORT)
+_POESTATUS = _descriptor.EnumDescriptor(
+ name='POEStatus',
+ full_name='usi.POEStatus',
+ filename=None,
+ file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='ON', index=0, number=0,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='OFF', index=1, number=1,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='FAULT', index=2, number=2,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ _descriptor.EnumValueDescriptor(
+ name='DENY', index=3, number=3,
+ serialized_options=None,
+ type=None,
+ create_key=_descriptor._internal_create_key),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=595,
+ serialized_end=644,
+)
+_sym_db.RegisterEnumDescriptor(_POESTATUS)
+
+POEStatus = enum_type_wrapper.EnumTypeWrapper(_POESTATUS)
+ALLIED_TELESIS_X230 = 0
+CISCO_9300 = 1
+OVS_SWITCH = 2
+FAUX_SWITCH = 3
+UP = 0
+DOWN = 1
+ENABLED = 0
+DISABLED = 1
+ON = 0
+OFF = 1
+FAULT = 2
+DENY = 3
+
+
+
+_SWITCHACTIONRESPONSE = _descriptor.Descriptor(
+ name='SwitchActionResponse',
+ full_name='usi.SwitchActionResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='success', full_name='usi.SwitchActionResponse.success', index=0,
+ number=1, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=18,
+ serialized_end=57,
+)
+
+
+_POWERRESPONSE = _descriptor.Descriptor(
+ name='PowerResponse',
+ full_name='usi.PowerResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='current_power_consumption', full_name='usi.PowerResponse.current_power_consumption', index=0,
+ number=1, type=2, cpp_type=6, label=1,
+ has_default_value=False, default_value=float(0),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='max_power_consumption', full_name='usi.PowerResponse.max_power_consumption', index=1,
+ number=2, type=2, cpp_type=6, label=1,
+ has_default_value=False, default_value=float(0),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='poe_support', full_name='usi.PowerResponse.poe_support', index=2,
+ number=3, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='poe_status', full_name='usi.PowerResponse.poe_status', index=3,
+ number=4, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=60,
+ serialized_end=215,
+)
+
+
+_INTERFACERESPONSE = _descriptor.Descriptor(
+ name='InterfaceResponse',
+ full_name='usi.InterfaceResponse',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='link_status', full_name='usi.InterfaceResponse.link_status', index=0,
+ number=1, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='link_speed', full_name='usi.InterfaceResponse.link_speed', index=1,
+ number=2, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='duplex', full_name='usi.InterfaceResponse.duplex', index=2,
+ number=3, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=217,
+ serialized_end=310,
+)
+
+
+_SWITCHINFO = _descriptor.Descriptor(
+ name='SwitchInfo',
+ full_name='usi.SwitchInfo',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='ip_addr', full_name='usi.SwitchInfo.ip_addr', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='device_port', full_name='usi.SwitchInfo.device_port', index=1,
+ number=3, type=5, cpp_type=1, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='model', full_name='usi.SwitchInfo.model', index=2,
+ number=4, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='username', full_name='usi.SwitchInfo.username', index=3,
+ number=5, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='password', full_name='usi.SwitchInfo.password', index=4,
+ number=6, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=312,
+ serialized_end=431,
+)
+
+_POWERRESPONSE.fields_by_name['poe_support'].enum_type = _POESUPPORT
+_POWERRESPONSE.fields_by_name['poe_status'].enum_type = _POESTATUS
+_INTERFACERESPONSE.fields_by_name['link_status'].enum_type = _LINKSTATUS
+_SWITCHINFO.fields_by_name['model'].enum_type = _SWITCHMODEL
+DESCRIPTOR.message_types_by_name['SwitchActionResponse'] = _SWITCHACTIONRESPONSE
+DESCRIPTOR.message_types_by_name['PowerResponse'] = _POWERRESPONSE
+DESCRIPTOR.message_types_by_name['InterfaceResponse'] = _INTERFACERESPONSE
+DESCRIPTOR.message_types_by_name['SwitchInfo'] = _SWITCHINFO
+DESCRIPTOR.enum_types_by_name['SwitchModel'] = _SWITCHMODEL
+DESCRIPTOR.enum_types_by_name['LinkStatus'] = _LINKSTATUS
+DESCRIPTOR.enum_types_by_name['POESupport'] = _POESUPPORT
+DESCRIPTOR.enum_types_by_name['POEStatus'] = _POESTATUS
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+SwitchActionResponse = _reflection.GeneratedProtocolMessageType('SwitchActionResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _SWITCHACTIONRESPONSE,
+ '__module__' : 'usi_pb2'
+ # @@protoc_insertion_point(class_scope:usi.SwitchActionResponse)
+ })
+_sym_db.RegisterMessage(SwitchActionResponse)
+
+PowerResponse = _reflection.GeneratedProtocolMessageType('PowerResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _POWERRESPONSE,
+ '__module__' : 'usi_pb2'
+ # @@protoc_insertion_point(class_scope:usi.PowerResponse)
+ })
+_sym_db.RegisterMessage(PowerResponse)
+
+InterfaceResponse = _reflection.GeneratedProtocolMessageType('InterfaceResponse', (_message.Message,), {
+ 'DESCRIPTOR' : _INTERFACERESPONSE,
+ '__module__' : 'usi_pb2'
+ # @@protoc_insertion_point(class_scope:usi.InterfaceResponse)
+ })
+_sym_db.RegisterMessage(InterfaceResponse)
+
+SwitchInfo = _reflection.GeneratedProtocolMessageType('SwitchInfo', (_message.Message,), {
+ 'DESCRIPTOR' : _SWITCHINFO,
+ '__module__' : 'usi_pb2'
+ # @@protoc_insertion_point(class_scope:usi.SwitchInfo)
+ })
+_sym_db.RegisterMessage(SwitchInfo)
+
+
+DESCRIPTOR._options = None
+
+_USISERVICE = _descriptor.ServiceDescriptor(
+ name='USIService',
+ full_name='usi.USIService',
+ file=DESCRIPTOR,
+ index=0,
+ serialized_options=None,
+ create_key=_descriptor._internal_create_key,
+ serialized_start=647,
+ serialized_end=886,
+ methods=[
+ _descriptor.MethodDescriptor(
+ name='GetPower',
+ full_name='usi.USIService.GetPower',
+ index=0,
+ containing_service=None,
+ input_type=_SWITCHINFO,
+ output_type=_POWERRESPONSE,
+ serialized_options=None,
+ create_key=_descriptor._internal_create_key,
+ ),
+ _descriptor.MethodDescriptor(
+ name='GetInterface',
+ full_name='usi.USIService.GetInterface',
+ index=1,
+ containing_service=None,
+ input_type=_SWITCHINFO,
+ output_type=_INTERFACERESPONSE,
+ serialized_options=None,
+ create_key=_descriptor._internal_create_key,
+ ),
+ _descriptor.MethodDescriptor(
+ name='disconnect',
+ full_name='usi.USIService.disconnect',
+ index=2,
+ containing_service=None,
+ input_type=_SWITCHINFO,
+ output_type=_SWITCHACTIONRESPONSE,
+ serialized_options=None,
+ create_key=_descriptor._internal_create_key,
+ ),
+ _descriptor.MethodDescriptor(
+ name='connect',
+ full_name='usi.USIService.connect',
+ index=3,
+ containing_service=None,
+ input_type=_SWITCHINFO,
+ output_type=_SWITCHACTIONRESPONSE,
+ serialized_options=None,
+ create_key=_descriptor._internal_create_key,
+ ),
+])
+_sym_db.RegisterServiceDescriptor(_USISERVICE)
+
+DESCRIPTOR.services_by_name['USIService'] = _USISERVICE
+
+# @@protoc_insertion_point(module_scope)
diff --git a/libs/proto/usi_pb2_grpc.py b/libs/proto/usi_pb2_grpc.py
new file mode 100644
index 0000000000..c8e57501c9
--- /dev/null
+++ b/libs/proto/usi_pb2_grpc.py
@@ -0,0 +1,161 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+"""Client and server classes corresponding to protobuf-defined services."""
+import grpc
+
+import usi_pb2 as usi__pb2
+
+
+class USIServiceStub(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def __init__(self, channel):
+ """Constructor.
+
+ Args:
+ channel: A grpc.Channel.
+ """
+ self.GetPower = channel.unary_unary(
+ '/usi.USIService/GetPower',
+ request_serializer=usi__pb2.SwitchInfo.SerializeToString,
+ response_deserializer=usi__pb2.PowerResponse.FromString,
+ )
+ self.GetInterface = channel.unary_unary(
+ '/usi.USIService/GetInterface',
+ request_serializer=usi__pb2.SwitchInfo.SerializeToString,
+ response_deserializer=usi__pb2.InterfaceResponse.FromString,
+ )
+ self.disconnect = channel.unary_unary(
+ '/usi.USIService/disconnect',
+ request_serializer=usi__pb2.SwitchInfo.SerializeToString,
+ response_deserializer=usi__pb2.SwitchActionResponse.FromString,
+ )
+ self.connect = channel.unary_unary(
+ '/usi.USIService/connect',
+ request_serializer=usi__pb2.SwitchInfo.SerializeToString,
+ response_deserializer=usi__pb2.SwitchActionResponse.FromString,
+ )
+
+
+class USIServiceServicer(object):
+ """Missing associated documentation comment in .proto file."""
+
+ def GetPower(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def GetInterface(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def disconnect(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+ def connect(self, request, context):
+ """Missing associated documentation comment in .proto file."""
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+ context.set_details('Method not implemented!')
+ raise NotImplementedError('Method not implemented!')
+
+
+def add_USIServiceServicer_to_server(servicer, server):
+ rpc_method_handlers = {
+ 'GetPower': grpc.unary_unary_rpc_method_handler(
+ servicer.GetPower,
+ request_deserializer=usi__pb2.SwitchInfo.FromString,
+ response_serializer=usi__pb2.PowerResponse.SerializeToString,
+ ),
+ 'GetInterface': grpc.unary_unary_rpc_method_handler(
+ servicer.GetInterface,
+ request_deserializer=usi__pb2.SwitchInfo.FromString,
+ response_serializer=usi__pb2.InterfaceResponse.SerializeToString,
+ ),
+ 'disconnect': grpc.unary_unary_rpc_method_handler(
+ servicer.disconnect,
+ request_deserializer=usi__pb2.SwitchInfo.FromString,
+ response_serializer=usi__pb2.SwitchActionResponse.SerializeToString,
+ ),
+ 'connect': grpc.unary_unary_rpc_method_handler(
+ servicer.connect,
+ request_deserializer=usi__pb2.SwitchInfo.FromString,
+ response_serializer=usi__pb2.SwitchActionResponse.SerializeToString,
+ ),
+ }
+ generic_handler = grpc.method_handlers_generic_handler(
+ 'usi.USIService', rpc_method_handlers)
+ server.add_generic_rpc_handlers((generic_handler,))
+
+
+ # This class is part of an EXPERIMENTAL API.
+class USIService(object):
+ """Missing associated documentation comment in .proto file."""
+
+ @staticmethod
+ def GetPower(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/usi.USIService/GetPower',
+ usi__pb2.SwitchInfo.SerializeToString,
+ usi__pb2.PowerResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def GetInterface(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/usi.USIService/GetInterface',
+ usi__pb2.SwitchInfo.SerializeToString,
+ usi__pb2.InterfaceResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def disconnect(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/usi.USIService/disconnect',
+ usi__pb2.SwitchInfo.SerializeToString,
+ usi__pb2.SwitchActionResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
+
+ @staticmethod
+ def connect(request,
+ target,
+ options=(),
+ channel_credentials=None,
+ call_credentials=None,
+ compression=None,
+ wait_for_ready=None,
+ timeout=None,
+ metadata=None):
+ return grpc.experimental.unary_unary(request, target, '/usi.USIService/connect',
+ usi__pb2.SwitchInfo.SerializeToString,
+ usi__pb2.SwitchActionResponse.FromString,
+ options, channel_credentials,
+ call_credentials, compression, wait_for_ready, timeout, metadata)
diff --git a/proto/system_config.proto b/proto/system_config.proto
index a0cbfbda48..1d1d3c6475 100644
--- a/proto/system_config.proto
+++ b/proto/system_config.proto
@@ -114,8 +114,17 @@ message DaqConfig {
// Set time between port disconnect and host tests shutdown
int32 port_flap_timeout_sec = 48;
+
+ // USI url
+ USISetup usi_setup = 49;
}
+/**
+ * USI paramters
+**/
+message USISetup {
+ string url = 1;
+}
/*
* System configuraiton of the access switch. This is used by the system
@@ -131,14 +140,17 @@ message SwitchSetup {
// Dataplane uplink port
int32 uplink_port = 13;
- // Local port of open flow controller
+ // Local port of DAQ OpenFlow controller
int32 lo_port = 14;
+ // Local port for an alternate OpenFlow controller
+ int32 alt_port = 16;
+
// IP address and subnet for local control plane interface
- string lo_addr = 15;
+ string lo_addr = 18;
// IP address template and subnet for module ip addresses
- string mods_addr = 16;
+ string mods_addr = 20;
// Dataplane id of external OpenFlow switch
string of_dpid = 41;
diff --git a/pubber/bin/keygen b/pubber/bin/keygen
index 1b3cf60d67..5ee3166d16 100755
--- a/pubber/bin/keygen
+++ b/pubber/bin/keygen
@@ -1,26 +1,21 @@
#!/bin/bash -e
-ROOT=$(realpath $(dirname $0)/../..)
-cd $ROOT
-
-TARGET_PREFIX=local/rsa_
-
-PUBLIC_CERT=${TARGET_PREFIX}cert.pem
-PRIVATE_CERT=${TARGET_PREFIX}private.pem
-PRIVATE_KEY=${TARGET_PREFIX}private.pkcs8
-
-if [ -f $PUBLIC_CERT ]; then
- echo $PUBLIC_CERT already exists, exiting.
+if [ "$#" != 2 ]; then
+ echo $0 [type] [out_dir]
false
fi
-if [ -f $PRIVATE_CERT ]; then
- echo $PRIVATE_CERT already exists, exiting.
- false
-fi
-if [ -f $PRIVATE_KEY ]; then
- echo $PRIVATE_KEY already exists, exiting.
+
+type=$1
+cd $2
+
+if [ $type == RS256 ]; then
+ openssl genrsa -out rsa_private.pem 2048
+ openssl rsa -in rsa_private.pem -pubout -out rsa_public.pem
+elif [ $type == RS256_X509 ]; then
+ openssl req -x509 -nodes -newkey rsa:2048 -keyout rsa_private.pem -days 1000000 -out rsa_cert.pem -subj "/CN=unused"
+else
+ echo Unknown key type $type. Try one of { RS256, RS256_X509 }
false
fi
-openssl req -x509 -nodes -newkey rsa:2048 -keyout $PRIVATE_CERT -days 1000000 -out $PUBLIC_CERT -subj "/CN=unused"
-openssl pkcs8 -topk8 -inform PEM -outform DER -in $PRIVATE_CERT -nocrypt > $PRIVATE_KEY
+openssl pkcs8 -topk8 -inform PEM -outform DER -in rsa_private.pem -nocrypt > rsa_private.pkcs8
diff --git a/pubber/src/main/java/daq/pubber/Configuration.java b/pubber/src/main/java/daq/pubber/Configuration.java
index 7c362781ef..e72d6919d2 100644
--- a/pubber/src/main/java/daq/pubber/Configuration.java
+++ b/pubber/src/main/java/daq/pubber/Configuration.java
@@ -10,6 +10,7 @@ public class Configuration {
public String registryId;
public String gatewayId;
public String deviceId;
+ public String sitePath;
public String keyFile = "local/rsa_private.pkcs8";
public byte[] keyBytes;
public String algorithm = "RS256";
diff --git a/pubber/src/main/java/daq/pubber/MqttPublisher.java b/pubber/src/main/java/daq/pubber/MqttPublisher.java
index 9358ee3546..ea32788d30 100644
--- a/pubber/src/main/java/daq/pubber/MqttPublisher.java
+++ b/pubber/src/main/java/daq/pubber/MqttPublisher.java
@@ -155,7 +155,9 @@ private MqttClient newMqttClient(String deviceId) {
try {
Preconditions.checkNotNull(registryId, "registryId is null");
Preconditions.checkNotNull(deviceId, "deviceId is null");
- MqttClient mqttClient = new MqttClient(getBrokerUrl(), getClientId(deviceId),
+ String clientId = getClientId(deviceId);
+ LOG.info("Creating new mqtt client for " + clientId);
+ MqttClient mqttClient = new MqttClient(getBrokerUrl(), clientId,
new MemoryPersistence());
return mqttClient;
} catch (Exception e) {
diff --git a/pubber/src/main/java/daq/pubber/Pubber.java b/pubber/src/main/java/daq/pubber/Pubber.java
index 5f28a08efa..1265b65e53 100644
--- a/pubber/src/main/java/daq/pubber/Pubber.java
+++ b/pubber/src/main/java/daq/pubber/Pubber.java
@@ -8,9 +8,6 @@
import daq.udmi.Message.Pointset;
import daq.udmi.Message.PointsetState;
import daq.udmi.Message.State;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
@@ -24,6 +21,8 @@
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class Pubber {
@@ -40,9 +39,10 @@ public class Pubber {
private static final int MIN_REPORT_MS = 200;
private static final int DEFAULT_REPORT_MS = 5000;
private static final int CONFIG_WAIT_TIME_MS = 10000;
- private static final int STATE_THROTTLE_MS = 1500;
+ private static final int STATE_THROTTLE_MS = 2000;
private static final String CONFIG_ERROR_STATUS_KEY = "config_error";
private static final int LOGGING_MOD_COUNT = 10;
+ public static final String KEY_SITE_PATH_FORMAT = "%s/devices/%s/rsa_private.pkcs8";
private final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
@@ -163,6 +163,11 @@ private void addPoint(AbstractPoint point) {
}
private void initialize() {
+ Preconditions.checkNotNull(configuration.deviceId, "configuration deviceId not defined");
+ if (configuration.sitePath != null) {
+ configuration.keyFile = String.format(KEY_SITE_PATH_FORMAT, configuration.sitePath,
+ configuration.deviceId);
+ }
Preconditions.checkState(mqttPublisher == null, "mqttPublisher already defined");
Preconditions.checkNotNull(configuration.keyFile, "configuration keyFile not defined");
System.err.println("Loading device key file from " + configuration.keyFile);
@@ -262,8 +267,8 @@ private void publishLogMessage(String deviceId, String logMessage) {
private void publishStateMessage(String deviceId) {
lastStateTimeMs = sleepUntil(lastStateTimeMs + STATE_THROTTLE_MS);
- info("Sending state message for device " + deviceId);
deviceState.timestamp = new Date();
+ info("Sending state message for device " + deviceId + " at " + deviceState.timestamp);
mqttPublisher.publish(deviceId, STATE_TOPIC, deviceState);
}
diff --git a/resources/device_types/deltacontrols_o3-din-cpu/module_config.json b/resources/device_types/deltacontrols_o3-din-cpu/module_config.json
index 066bf35abb..a17a5a6997 100644
--- a/resources/device_types/deltacontrols_o3-din-cpu/module_config.json
+++ b/resources/device_types/deltacontrols_o3-din-cpu/module_config.json
@@ -24,6 +24,9 @@
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"switch": {
"enabled": true,
"poe": {
diff --git a/resources/device_types/distech_ecy-s1000/module_config.json b/resources/device_types/distech_ecy-s1000/module_config.json
index d790899c5c..b9a1847754 100644
--- a/resources/device_types/distech_ecy-s1000/module_config.json
+++ b/resources/device_types/distech_ecy-s1000/module_config.json
@@ -24,6 +24,9 @@
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"switch": {
"enabled": true,
"poe": {
diff --git a/resources/setups/baseline/module_config.json b/resources/setups/baseline/module_config.json
index 8ebd56cc57..144c249ce2 100644
--- a/resources/setups/baseline/module_config.json
+++ b/resources/setups/baseline/module_config.json
@@ -1,7 +1,10 @@
{
"modules": {
"ipaddr": {
- "timeout_sec": 300
+ "enabled": false,
+ "timeout_sec": 900,
+ "port_flap_timeout_sec": 20,
+ "dhcp_ranges": [["192.168.0.1", "192.168.255.254", 16]]
},
"pass": {
"enabled": true
@@ -32,6 +35,9 @@
},
"network": {
"enabled": true
+ },
+ "ntp": {
+ "enabled": true
}
}
}
diff --git a/resources/setups/qualification/device_module_config.json b/resources/setups/qualification/device_module_config.json
index 6019735d2b..9713c5723b 100644
--- a/resources/setups/qualification/device_module_config.json
+++ b/resources/setups/qualification/device_module_config.json
@@ -39,6 +39,9 @@
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"switch": {
"enabled": true,
"poe": {
diff --git a/resources/setups/qualification/device_type_module_config.json b/resources/setups/qualification/device_type_module_config.json
index f183d5527c..9d3181460b 100644
--- a/resources/setups/qualification/device_type_module_config.json
+++ b/resources/setups/qualification/device_type_module_config.json
@@ -33,6 +33,9 @@
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"switch": {
"enabled": true,
"poe": {
diff --git a/resources/setups/qualification/system_module_config.json b/resources/setups/qualification/system_module_config.json
index de3cc56461..8b1926a3f1 100644
--- a/resources/setups/qualification/system_module_config.json
+++ b/resources/setups/qualification/system_module_config.json
@@ -24,6 +24,9 @@
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"switch": {
"enabled": true,
"poe": {
diff --git a/resources/setups/remediation/device_module_config.json b/resources/setups/remediation/device_module_config.json
index 976761c762..65223dac27 100644
--- a/resources/setups/remediation/device_module_config.json
+++ b/resources/setups/remediation/device_module_config.json
@@ -35,6 +35,9 @@
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"switch": {
"enabled": true,
"poe": {
diff --git a/resources/setups/remediation/system_module_config.json b/resources/setups/remediation/system_module_config.json
index 17e7793758..754145aa9e 100644
--- a/resources/setups/remediation/system_module_config.json
+++ b/resources/setups/remediation/system_module_config.json
@@ -24,6 +24,9 @@
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"switch": {
"enabled": true,
"poe": {
diff --git a/resources/test_site/module_config.json b/resources/test_site/module_config.json
index 4d672b67aa..ac17f224b1 100644
--- a/resources/test_site/module_config.json
+++ b/resources/test_site/module_config.json
@@ -17,6 +17,12 @@
},
"manual": {
"enabled": true
+ },
+ "ntp": {
+ "enabled": true
+ },
+ "ssh": {
+ "enabled": false
}
},
"process": {
diff --git a/subset/cloud/test_udmi b/subset/cloud/test_udmi
index 4b0cfb2a32..aa484f5434 100755
--- a/subset/cloud/test_udmi
+++ b/subset/cloud/test_udmi
@@ -1,4 +1,5 @@
#!/bin/bash -e
+
source reporting.sh
REPORT=/tmp/report.txt
@@ -59,7 +60,7 @@ echo Configured schema is $schema_path
echo Target device is $device_id
echo
-timeout 60 validator/bin/validate $PWD/$schema_path pubsub:$gcp_topic $service_id-$HOSTNAME || true
+timeout 60 validator/bin/validate $PWD/$schema_path pubsub:$gcp_topic $service_id-$HOSTNAME -- || true
function message_report {
message_type=$1
@@ -87,3 +88,7 @@ function message_report {
for message_type in $message_types; do
message_report $message_type
done
+
+fgrep RESULT $REPORT
+
+echo Done with test_udmi
diff --git a/subset/network/network_tests.py b/subset/network/network_tests.py
index 6dbaca3f85..d785debc67 100644
--- a/subset/network/network_tests.py
+++ b/subset/network/network_tests.py
@@ -1,5 +1,8 @@
import subprocess, time, sys, json
+import re
+import datetime
+
arguments = sys.argv
test_request = str(arguments[1])
@@ -24,13 +27,18 @@
description_communication_type = 'Device sends unicast or broadcast packets.'
description_ntp_support = 'Device sends NTP request packets.'
-tcpdump_display_all_packets = 'tcpdump -n src host ' + device_address + ' -r ' + cap_pcap_file
+tcpdump_display_all_packets = 'tcpdump -tttt -n src host ' + device_address + ' -r ' + cap_pcap_file
tcpdump_display_udp_bacnet_packets = 'tcpdump -n udp dst portrange 47808-47809 -r ' + cap_pcap_file
-tcpdump_display_arp_packets = 'tcpdump arp -r ' + cap_pcap_file
+tcpdump_display_arp_packets = 'tcpdump arp -n src host ' + device_address + ' -r ' + cap_pcap_file
tcpdump_display_ntp_packets = 'tcpdump dst port 123 -r ' + cap_pcap_file
tcpdump_display_eapol_packets = 'tcpdump port 1812 or port 1813 or port 3799 -r ' + cap_pcap_file
tcpdump_display_broadcast_packets = 'tcpdump broadcast and src host ' + device_address + ' -r ' + cap_pcap_file
+system_conf_file = "/config/inst/system.conf"
+tcpdump_date_format = "%Y-%m-%d %H:%M:%S.%f"
+min_send_seconds = 300
+min_send_duration = "5 minutes"
+
def write_report(string_to_append):
print(string_to_append.strip())
with open(report_filename, 'a+') as file_open:
@@ -49,7 +57,7 @@ def add_packet_count_to_report(packet_type, packet_count):
write_report("{i} {t} Packets recieved={p}\n".format(i=ignore, t=packet_type, p=packet_count))
def add_packet_info_to_report(packets_received):
- packet_list = packets_received.rstrip().split("\n")
+ packet_list = packets_received.strip().split("\n")
outnum = min(len(packet_list), max_packets_in_report)
for x in range(0, outnum):
write_report("{i} {p}\n".format(i=ignore, p=packet_list[x]))
@@ -99,19 +107,95 @@ def decode_json_config(config_file, map_name, action):
elif action == 'remove':
remove_from_port_list(port_map)
+
+def get_scan_length(config_file):
+ """ Gets length of the monitor.pcap scan
+
+ Reads the system.conf file to and returns the length of the monitor_scan
+
+ Args:
+ config_file: Location of system.conf file within test container
+
+ Returns:
+ Length of monitor scan in seconds
+
+ If not defined, or system.conf could not be found
+ returns false
+ """
+
+ scan_length = False
+ try:
+ with open(config_file) as file:
+ for line in file:
+ match = re.search(r'^monitor_scan_sec=(\d+)', line)
+ if match:
+ matched_length = int(match.group(1))
+ # If scan length = 0 or not found, then monitor scan does not exist
+ scan_length = matched_length if matched_length > 0 else False
+ return scan_length
+ except Exception as e:
+ write_report("Error encountered reading system.conf {}".format(e))
+ return False
+
def test_connection_min_send():
+ """ Runs the connection.min_send test
+
+ Tests if the device sends data packets of any type (inc data, NTP, etc)
+ within a period of 5 minutes by looking through the monitor.pcap file
+
+ The length of test can be configured using the min_send_seconds variable
+ at the start of the file
+ """
+
+ # Get scan length
+ scan_length = get_scan_length(system_conf_file)
+ min_send_delta = datetime.timedelta(seconds=min_send_seconds)
+ min_send_pass = False
+
+ # The test scans the monitor.pcap, so if it's not found skip
+ if not scan_length:
+ add_summary("DAQ monitor scan not running, test skipped")
+ return 'skip'
+
arp_shell_result = shell_command_with_result(tcpdump_display_arp_packets, 0, False)
arp_packets_received = packets_received_count(arp_shell_result)
if arp_packets_received > 0:
add_summary("ARP packets received.")
+
shell_result = shell_command_with_result(tcpdump_display_all_packets, 0, False)
- all_packets_received = packets_received_count(shell_result)
- app_packets_received = all_packets_received - arp_packets_received
- if app_packets_received > 0:
- add_summary("Other packets received.")
- print('min_send_packets', arp_packets_received, all_packets_received)
+ all_packets = shell_result.splitlines()
+
+ # Loop through tcpdump result and measure the time between succesive packets
+ for i, packet in enumerate(all_packets):
+ # datetime is the first 26 characters of the line
+ packet_time = datetime.datetime.strptime(packet[:26], tcpdump_date_format)
+
+ if i == 0:
+ previous_packet_time = packet_time
+ continue
+
+ delta = packet_time - previous_packet_time
+ if delta < min_send_delta:
+ min_send_pass = True
+ break
+
+ previous_packet_time = packet_time
+
add_packet_info_to_report(shell_result)
- return 'pass' if app_packets_received > 0 else 'fail'
+
+ if not min_send_pass:
+ if scan_length > min_send_seconds:
+ add_summary('Data packets were not sent at a frequency less than ' +
+ min_send_duration)
+ return 'fail'
+ else:
+ add_summary('Please set DAQ monitor scan to be greater than ' +
+ min_send_duration)
+ return 'skip'
+
+ add_summary('Data packets were sent at a frequency of less than ' +
+ min_send_duration)
+ return 'pass'
def test_connection_dhcp_long():
shell_result = shell_command_with_result(tcpdump_display_arp_packets, 0, False)
diff --git a/subset/ntp/Dockerfile.test_ntp b/subset/ntp/Dockerfile.test_ntp
new file mode 100644
index 0000000000..2bfedf5a18
--- /dev/null
+++ b/subset/ntp/Dockerfile.test_ntp
@@ -0,0 +1,10 @@
+FROM daqf/aardvark:latest
+
+RUN $AG update && $AG install python python-setuptools python-pip netcat
+
+RUN pip install scapy
+
+COPY subset/ntp/ntp_tests.py .
+COPY subset/ntp/test_ntp .
+
+CMD ["./test_ntp"]
diff --git a/subset/ntp/README.md b/subset/ntp/README.md
new file mode 100644
index 0000000000..b6b4bc3a8e
--- /dev/null
+++ b/subset/ntp/README.md
@@ -0,0 +1,17 @@
+# NTP testing
+
+## test_ntp
+The NTP test inspects client NTP support and version.
+
+### Note for test developers
+The functional test code is included in the `ntp_tests.py` file.
+
+The test reads packets from startup.pcap.
+
+If the python code needs debugging, the pip module `scapy` is required (`pip install scapy`).
+
+### NTP Test conditions
+| Test ID | Info | Pass | Fail | Skip |
+|---|---|---|---|---|
+| connection.network.ntp_support | Are the received NTP packets using NTP v4? | NTP version is 4 | NTP version is not 4 | No NTP packets are received |
+
diff --git a/subset/ntp/build.conf b/subset/ntp/build.conf
new file mode 100644
index 0000000000..febb370b8f
--- /dev/null
+++ b/subset/ntp/build.conf
@@ -0,0 +1,2 @@
+build subset/ntp
+add ntp
diff --git a/subset/ntp/ntp_tests.py b/subset/ntp/ntp_tests.py
new file mode 100644
index 0000000000..2a774f1124
--- /dev/null
+++ b/subset/ntp/ntp_tests.py
@@ -0,0 +1,74 @@
+from __future__ import absolute_import
+import sys
+from scapy.all import NTP, rdpcap
+
+arguments = sys.argv
+
+test_request = str(arguments[1])
+cap_pcap_file = str(arguments[2])
+
+report_filename = 'report.txt'
+ignore = '%%'
+summary_text = ''
+result = 'fail'
+dash_break_line = '--------------------\n'
+description_ntp_support = 'Device supports NTP version 4.'
+
+
+def write_report(string_to_append):
+ with open(report_filename, 'a+') as file_open:
+ file_open.write(string_to_append)
+
+
+# Extracts the NTP version from the first client NTP packet
+def ntp_client_version(capture):
+ client_packets = ntp_packets(capture, 3)
+ if len(client_packets) == 0:
+ return None
+ return client_packets[0].version
+
+
+# Filters the packets by type (NTP)
+def ntp_packets(capture, mode=None):
+ packets = []
+ for packet in capture:
+ if NTP in packet:
+ ip = packet.payload
+ udp = ip.payload
+ ntp = udp.payload
+ if mode is None or mode == ntp.mode:
+ packets.append(ntp)
+ return packets
+
+
+def test_ntp_support():
+ capture = rdpcap(cap_pcap_file)
+ if len(capture) > 0:
+ version = ntp_client_version(capture)
+ if version is None:
+ add_summary("No NTP packets received.")
+ return 'skip'
+ if version == 4:
+ add_summary("Using NTPv4.")
+ return 'pass'
+ else:
+ add_summary("Not using NTPv4.")
+ return 'fail'
+ else:
+ add_summary("No NTP packets received.")
+ return 'skip'
+
+
+def add_summary(text):
+ global summary_text
+ summary_text = summary_text + " " + text if summary_text else text
+
+
+write_report("{b}{t}\n{b}".format(b=dash_break_line, t=test_request))
+
+
+if test_request == 'connection.network.ntp_support':
+ write_report("{d}\n{b}".format(b=dash_break_line, d=description_ntp_support))
+ result = test_ntp_support()
+
+write_report("RESULT {r} {t} {s}\n".format(r=result, t=test_request, s=summary_text.strip()))
diff --git a/subset/ntp/test_ntp b/subset/ntp/test_ntp
new file mode 100755
index 0000000000..7521f9d74d
--- /dev/null
+++ b/subset/ntp/test_ntp
@@ -0,0 +1,9 @@
+#!/bin/bash -e
+
+REPORT=/tmp/report.txt
+
+STARTUP=/scans/startup.pcap
+
+python ntp_tests.py connection.network.ntp_support $STARTUP
+
+cat report.txt >> $REPORT
diff --git a/subset/security/Dockerfile.test_ssh b/subset/security/Dockerfile.test_ssh
new file mode 100644
index 0000000000..aa701b5550
--- /dev/null
+++ b/subset/security/Dockerfile.test_ssh
@@ -0,0 +1,7 @@
+FROM daqf/aardvark:latest
+
+RUN $AG update && $AG install nmap
+
+COPY subset/security/test_ssh .
+
+CMD ./test_ssh
diff --git a/subset/security/build.conf b/subset/security/build.conf
index 763d155e46..26876f4343 100644
--- a/subset/security/build.conf
+++ b/subset/security/build.conf
@@ -1,3 +1,4 @@
build subset/security
add tls
add password
+add ssh
diff --git a/subset/security/readme.md b/subset/security/readme.md
index a5f65cf277..2143155dc0 100644
--- a/subset/security/readme.md
+++ b/subset/security/readme.md
@@ -43,3 +43,11 @@ The functional test code is included in the `tlstest/src/main/java` folder.
- pass -> If the device responds to a connection with TLS 1.3 support and provides a valid certificate.
- fail -> If the device responds to a connection with TLS 1.3 support and provides an invalid certificate.
- skip -> If no connection to the device can be established.
+
+## test_ssh
+The SSH test will check that if a device has an SSH server, this only supports SSHv2
+
+### Conditions for seucrity.ssh.version
+- pass -> If the device runs an SSH server which only supports SSHv2
+- fail -> If the device runs an SSH server which supports SSHv1
+- skip -> If the device does not run an SSH server
\ No newline at end of file
diff --git a/subset/security/ssh_additions.config b/subset/security/ssh_additions.config
deleted file mode 100644
index 7e8895f7c9..0000000000
--- a/subset/security/ssh_additions.config
+++ /dev/null
@@ -1,5 +0,0 @@
-Port 22
-ListenAddress 0.0.0.0
-PermitRootLogin yes
-PasswordAuthentication yes
-KexAlgorithms diffie-hellman-group1-sha1,diffie-hellman-group-exchange-sha1
diff --git a/subset/security/sshfaux/ssh_build.sh b/subset/security/sshfaux/ssh_build.sh
new file mode 100644
index 0000000000..f870555f1d
--- /dev/null
+++ b/subset/security/sshfaux/ssh_build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Build older versions OpenSSL 1.0.2 and OpenSSH 7.2
+# Used for testing in faux devices only
+#
+# To run SSHD use /usr/local/sbin/sshd
+# SSH components, e.g. ssh-keygen are found in /usr/local/bin
+# SSH configuration and keys found in /usr/local/etc
+
+# Build OpenSSL 1.0.2
+wget https://www.openssl.org/source/openssl-1.0.2g.tar.gz
+tar -xzf openssl-1.0.2g.tar.gz
+cd openssl-1.0.2g
+./config --prefix=/usr/local/openssl --openssldir=/usr/local/openssl
+make -s
+make -s install
+cd ..
+
+# Prepare privellage seperation for SSHD
+source ssh_privsep.sh
+
+# Build OpenSSH 7.2
+wget https://mirrors.mit.edu/pub/OpenBSD/OpenSSH/portable/openssh-7.2p1.tar.gz
+tar -xzf openssh-7.2p1.tar.gz
+cd openssh-7.2p1
+./configure --with-ssl-dir=/usr/local/openssl --with-ssh1
+make -s
+make -s install
diff --git a/subset/security/sshfaux/ssh_privsep.sh b/subset/security/sshfaux/ssh_privsep.sh
new file mode 100644
index 0000000000..668d825f9e
--- /dev/null
+++ b/subset/security/sshfaux/ssh_privsep.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+# Prepare environment for running SSHD with privilege separation
+# https://github.com/openssh/openssh-portable/blob/master/README.privsep
+
+mkdir /etc/ssh
+mkdir /var/empty
+chown root:sys /var/empty
+chmod 755 /var/empty
+groupadd sshd
+useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd
diff --git a/subset/security/test_ssh b/subset/security/test_ssh
new file mode 100755
index 0000000000..e1af63282c
--- /dev/null
+++ b/subset/security/test_ssh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Checks if a device only support SSHv2
+# Runs NMAP to check if SSH is available
+# Uses the 'sshv_1' nmap script to check if the server supports SSHv1
+
+source reporting.sh
+
+TEST_NAME="security.ssh.version"
+TEST_DESCRIPTION="Check that device only support SSHv2"
+REPORT=/tmp/report.txt
+LOG=/tmp/nmap_log.txt
+
+nmap -sV -sC $TARGET_IP > $LOG
+
+nmap_log=$(cat $LOG )
+
+sshv1=$(grep 'sshv1: Server supports SSHv1' $LOG)
+
+if [[ -z "${sshv1}" ]]; then
+ #No SSHv1, but is there an SSHv2 server running ?
+ sshv2=$(grep -P '^\d+\/tcp\s+open ssh.*protocol 2.0\)$' $LOG)
+
+ if [[ -z "${sshv2}" ]]; then
+ test_outcome="skip"
+ test_summary="Device is not running an SSH server"
+ else
+ test_outcome="pass"
+ test_summary="Device only supports SSHv2"
+ fi
+
+else
+ test_outcome="fail"
+ test_summary="Device supports SSHv1"
+fi
+
+result_and_summary="RESULT ${test_outcome} ${TEST_NAME} ${test_summary}"
+
+write_out_result $REPORT \
+ "$TEST_NAME" \
+ "$TEST_DESCRIPTION" \
+ "$sshv2" \
+ "$result_and_summary"
+
\ No newline at end of file
diff --git a/testing/run_unit_tests.sh b/testing/run_unit_tests.sh
index b82892e384..a0705015a3 100755
--- a/testing/run_unit_tests.sh
+++ b/testing/run_unit_tests.sh
@@ -10,7 +10,7 @@ source venv/bin/activate
coverage erase
-export PYTHONPATH=$BASEDIR/daq:$BASEDIR/mininet:$BASEDIR/faucet:$BASEDIR/forch:$BASEDIR/bin/python
+export PYTHONPATH=$BASEDIR/daq:$BASEDIR/mininet:$BASEDIR/faucet:$BASEDIR/forch:$BASEDIR/bin/python:$BASEDIR/libs:$BASEDIR/libs/proto
coverage run \
--source $BASEDIR/daq,$BASEDIR/bin/python/ \
-m unittest discover \
diff --git a/testing/test_aux.out b/testing/test_aux.out
index acf12f7aa7..75933922ee 100644
--- a/testing/test_aux.out
+++ b/testing/test_aux.out
@@ -57,7 +57,10 @@ RESULT pass security.passwords.telnet Default passwords have been changed.
RESULT pass security.passwords.ssh Default passwords have been changed.
RESULT skip security.firmware Could not retrieve a firmware version with nmap. Check bacnet port.
RESULT pass security.firmware version found: ?\xFF\xFF\x19,>u\x08\x00no
-dhcp requests 1 1 0 1
+RESULT pass connection.network.ntp_support Using NTPv4.
+RESULT fail connection.network.ntp_support Not using NTPv4.
+RESULT skip connection.network.ntp_support No NTP packets received.
+dhcp requests 1 1 1 1
01: []
02: ['02:macoui:TimeoutError', '02:ping:TimeoutError']
03: []
@@ -91,7 +94,16 @@ port-01 module_config modules
"enabled": false
},
"ipaddr": {
- "timeout_sec": 300
+ "dhcp_ranges": [
+ [
+ "192.168.0.1",
+ "192.168.255.254",
+ 16
+ ]
+ ],
+ "enabled": false,
+ "port_flap_timeout_sec": 20,
+ "timeout_sec": 900
},
"macoui": {
"enabled": true
@@ -105,12 +117,18 @@ port-01 module_config modules
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"pass": {
"enabled": true
},
"password": {
"enabled": true
},
+ "ssh": {
+ "enabled": false
+ },
"switch": {
"enabled": true,
"poe": {
@@ -139,7 +157,16 @@ port-02 module_config modules
"enabled": true
},
"ipaddr": {
- "timeout_sec": 300
+ "dhcp_ranges": [
+ [
+ "192.168.0.1",
+ "192.168.255.254",
+ 16
+ ]
+ ],
+ "enabled": false,
+ "port_flap_timeout_sec": 20,
+ "timeout_sec": 900
},
"macoui": {
"enabled": true,
@@ -154,12 +181,18 @@ port-02 module_config modules
"nmap": {
"enabled": true
},
+ "ntp": {
+ "enabled": true
+ },
"pass": {
"enabled": false
},
"password": {
"enabled": true
},
+ "ssh": {
+ "enabled": false
+ },
"switch": {
"enabled": true
},
diff --git a/testing/test_aux.sh b/testing/test_aux.sh
index ebf75ee79c..cc97d80a22 100755
--- a/testing/test_aux.sh
+++ b/testing/test_aux.sh
@@ -62,13 +62,13 @@ site_path: inst/test_site
schema_path: schemas/udmi
interfaces:
faux-1:
- opts: brute broadcast_client ntp_pass
+ opts: brute broadcast_client ntpv4
faux-2:
- opts: nobrute expiredtls bacnetfail pubber passwordfail ntp_fail opendns
+ opts: nobrute expiredtls bacnetfail pubber passwordfail ntpv3 opendns ssh
faux-3:
- opts: tls macoui passwordpass bacnet pubber broadcast_client
+ opts: tls macoui passwordpass bacnet pubber broadcast_client ssh
long_dhcp_response_sec: 0
-monitor_scan_sec: 0
+monitor_scan_sec: 20
EOF
if [ -f "$gcp_cred" ]; then
@@ -114,13 +114,14 @@ capture_test_results tls
capture_test_results password
capture_test_results discover
capture_test_results network
+capture_test_results ntp
# Capture peripheral logs
more inst/run-port-*/scans/ip_triggers.txt | cat
dhcp_done=$(fgrep done inst/run-port-01/scans/ip_triggers.txt | wc -l)
dhcp_long=$(fgrep long inst/run-port-01/scans/ip_triggers.txt | wc -l)
echo dhcp requests $((dhcp_done > 1)) $((dhcp_done < 3)) \
- $((dhcp_long > 1)) $((dhcp_long < 4)) | tee -a $TEST_RESULTS
+ $((dhcp_long >= 1)) $((dhcp_long < 4)) | tee -a $TEST_RESULTS
sort inst/result.log | tee -a $TEST_RESULTS
# Show partial logs from each test
diff --git a/testing/test_base.out b/testing/test_base.out
index 86580802e2..7ad42c6cce 100644
--- a/testing/test_base.out
+++ b/testing/test_base.out
@@ -36,15 +36,6 @@ Overall device result PASS
|pass|security.ports.nmap|Other|Other|Only allowed ports found open.|
-## Module ipaddr
-
-
-#### Module Config
-
-|Attribute|Value|
-|---|---|
-|timeout_sec|300|
-
## Module pass
@@ -152,6 +143,8 @@ RESULT pass base.switch.ping target %% 192.0.2.138:2
Switch test with target 192.0.2.138:2
Monolog processing base.switch.ping...
switch ping 2
+%%%%%%%%%%%%%%%%%%%%%% Alt switch tests
+XXX faucet.valve INFO DPID 1 (0x1) pri L2 learned on Port 1 9a:02:57:1e:8f:00 (L2 type 0x0800, L2 dst ff:ff:ff:ff:ff:ff, L3 src X.X.X.X, L3 dst 255.255.255.255) Port 1 VLAN 1002 (1 hosts total)
%%%%%%%%%%%%%%%%%%%%%% Mud profile tests
result open 01: [] 02: [] 03: []
device open 1 1 1
diff --git a/testing/test_base.sh b/testing/test_base.sh
index d39e1c692c..3a4363e93a 100755
--- a/testing/test_base.sh
+++ b/testing/test_base.sh
@@ -52,6 +52,11 @@ cat -vet inst/run-port-02/nodes/ping02/activate.log
count=$(fgrep icmp_seq=5 inst/run-port-02/nodes/ping02/activate.log | wc -l)
echo switch ping $count | tee -a $TEST_RESULTS
+echo %%%%%%%%%%%%%%%%%%%%%% Alt switch tests | tee -a $TEST_RESULTS
+cp config/system/alt.yaml local/system.yaml
+# TODO: Replace this with proper test once VLAN-triggers are added.
+timeout 120s cmd/run -s
+fgrep 'Port 1 9a:02:57:1e:8f:00' inst/faucet.log | redact | tee -a $TEST_RESULTS
echo %%%%%%%%%%%%%%%%%%%%%% Mud profile tests | tee -a $TEST_RESULTS
rm -f local/system.yaml
cp config/system/muddy.conf local/system.conf
diff --git a/testing/test_dhcp.out b/testing/test_dhcp.out
index 6d2dc2e822..31148ca8b6 100644
--- a/testing/test_dhcp.out
+++ b/testing/test_dhcp.out
@@ -1,12 +1,15 @@
Running testing/test_dhcp.sh
DHCP Tests
01: []
-02: ['02:ipaddr:TimeoutError']
+02: ['02:acquire:TimeoutError']
03: []
04: []
+05: []
Device 1 ip triggers: 1 0
Device 2 ip triggers: 0 0
Device 3 long ip triggers: 1
Device 4 ip triggers: 1
-Number of ips: 2
+Device 4 subnet 1 ip: 1 subnet 2 ip: 1 subnet 3 ip: 2
+Device 5 ip triggers: 1
+Device 5 num of ips: 2
Done with tests
diff --git a/testing/test_dhcp.sh b/testing/test_dhcp.sh
index c979de2a98..791b091669 100755
--- a/testing/test_dhcp.sh
+++ b/testing/test_dhcp.sh
@@ -7,11 +7,12 @@ echo DHCP Tests >> $TEST_RESULTS
cat < local/system.conf
source config/system/default.yaml
site_description="Multi-Device Configuration"
-switch_setup.uplink_port=5
+switch_setup.uplink_port=6
interfaces.faux-1.opts=
interfaces.faux-2.opts=xdhcp
interfaces.faux-3.opts=
interfaces.faux-4.opts=
+interfaces.faux-5.opts=
monitor_scan_sec=1
EOF
@@ -29,14 +30,31 @@ cat < local/site/mac_addrs/$intf_mac/module_config.json
}
EOF
+# Multi subnet multi subnet tests
intf_mac="9a02571e8f04"
mkdir -p local/site/mac_addrs/$intf_mac
cat < local/site/mac_addrs/$intf_mac/module_config.json
{
"modules": {
"ipaddr": {
- "timeout_sec": 320,
- "dhcp_mode": "ip_change"
+ "enabled": true,
+ "port_flap_timeout_sec": 20,
+ "dhcp_ranges": [["192.168.0.1", "192.168.255.254", 16], ["10.255.255.1", "10.255.255.255", 24], ["172.16.0.1", "172.16.0.200", 24]]
+ }
+ }
+}
+EOF
+
+# ip change test
+intf_mac="9a02571e8f05"
+mkdir -p local/site/mac_addrs/$intf_mac
+cat < local/site/mac_addrs/$intf_mac/module_config.json
+{
+ "modules": {
+ "ipaddr": {
+ "enabled": true,
+ "port_flap_timeout_sec": 20,
+ "dhcp_ranges": []
}
}
}
@@ -47,7 +65,7 @@ cmd/run -b -s settle_sec=0 dhcp_lease_time=120s
cat inst/result.log | sort | tee -a $TEST_RESULTS
-for iface in $(seq 1 4); do
+for iface in $(seq 1 5); do
intf_mac=9a:02:57:1e:8f:0$iface
ip_file=inst/run-port-0$iface/scans/ip_triggers.txt
cat $ip_file
@@ -55,11 +73,17 @@ for iface in $(seq 1 4); do
long_triggers=$(fgrep long $ip_file | wc -l)
num_ips=$(cat $ip_file | cut -d ' ' -f 1 | sort | uniq | wc -l)
echo Found $ip_triggers ip triggers and $long_triggers long ip responses.
- if [ $iface == 4 ]; then
- echo "Device $iface ip triggers: $(((ip_triggers + long_triggers) >= 2))" | tee -a $TEST_RESULTS
- echo "Number of ips: $num_ips" | tee -a $TEST_RESULTS
+ if [ $iface == 5 ]; then
+ echo "Device $iface ip triggers: $(((ip_triggers + long_triggers) >= 3))" | tee -a $TEST_RESULTS
+ echo "Device $iface num of ips: $num_ips" | tee -a $TEST_RESULTS
+ elif [ $iface == 4 ]; then
+ echo "Device $iface ip triggers: $(((ip_triggers + long_triggers) >= 4))" | tee -a $TEST_RESULTS
+ subnet_ip=$(fgrep "ip notification 192.168" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l)
+ subnet2_ip=$(fgrep "ip notification 10.255.255" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l)
+ subnet3_ip=$(fgrep "ip notification 172.16.0" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l)
+ echo "Device $iface subnet 1 ip: $subnet_ip subnet 2 ip: $subnet2_ip subnet 3 ip: $subnet3_ip" | tee -a $TEST_RESULTS
elif [ $iface == 3 ]; then
- echo "Device $iface long ip triggers: $((long_triggers > 0))" | tee -a $TEST_RESULTS
+ echo "Device $iface long ip triggers: $((long_triggers > 0))" | tee -a $TEST_RESULTS
else
echo "Device $iface ip triggers: $((ip_triggers > 0)) $((long_triggers > 0))" | tee -a $TEST_RESULTS
fi
diff --git a/testing/test_many.gcp b/testing/test_many.gcp
index b24657de64..eccaaa37a2 100644
--- a/testing/test_many.gcp
+++ b/testing/test_many.gcp
@@ -1,6 +1,2 @@
Running testing/test_many.sh
GCP results diff
-5c5
-< Source: gcp
----
-> Source: local
diff --git a/testing/test_many.out b/testing/test_many.out
index fdef3cc356..29cc0367df 100644
--- a/testing/test_many.out
+++ b/testing/test_many.out
@@ -4,6 +4,9 @@ DAQ stress test
Enough results: 1
Enough DHCP timeouts: 1
Enough static ips: 1
+Enough ipaddr tests: 1
+Enough alternate subnet ips: 1
+Enough ipaddr timeouts: 1
Redacted soak diff
No soak report diff
Done with many
diff --git a/testing/test_many.sh b/testing/test_many.sh
index 2dabc2e0c0..3dd796452b 100755
--- a/testing/test_many.sh
+++ b/testing/test_many.sh
@@ -3,11 +3,16 @@
source testing/test_preamble.sh
# num of devices need to less than 10
-NUM_DEVICES=8
+NUM_DEVICES=9
RUN_LIMIT=20
# num of timeout devices need to be less or equal to num dhcp devices
NUM_NO_DHCP_DEVICES=4
NUM_TIMEOUT_DEVICES=2
+
+# Extended DHCP tests
+NUM_IPADDR_TEST_DEVICES=2
+NUM_IPADDR_TEST_TIMEOUT_DEVICES=1
+
echo Many Tests >> $TEST_RESULTS
echo source config/system/default.yaml > local/system.conf
@@ -15,14 +20,15 @@ echo source config/system/default.yaml > local/system.conf
echo monitor_scan_sec=5 >> local/system.conf
echo switch_setup.uplink_port=$((NUM_DEVICES+1)) >> local/system.conf
echo gcp_cred=$gcp_cred >> local/system.conf
+echo dhcp_lease_time=120s >> local/system.conf
for iface in $(seq 1 $NUM_DEVICES); do
xdhcp=""
+ intf_mac="9a02571e8f0$iface"
+ mkdir -p local/site/mac_addrs/$intf_mac
if [[ $iface -le $NUM_NO_DHCP_DEVICES ]]; then
ip="10.20.0.$((iface+5))"
- intf_mac="9a02571e8f0$iface"
xdhcp="xdhcp=$ip"
- mkdir -p local/site/mac_addrs/$intf_mac
if [[ $iface -gt $NUM_TIMEOUT_DEVICES ]]; then
#Install site specific configs for xdhcp ips
cat < local/site/mac_addrs/$intf_mac/module_config.json
@@ -39,6 +45,31 @@ EOF
}
}
}
+EOF
+ fi
+ elif [[ $iface -le $((NUM_NO_DHCP_DEVICES + NUM_IPADDR_TEST_DEVICES)) ]]; then
+ if [[ $iface -le $((NUM_NO_DHCP_DEVICES + NUM_IPADDR_TEST_TIMEOUT_DEVICES)) ]]; then
+ cat < local/site/mac_addrs/$intf_mac/module_config.json
+ {
+ "modules": {
+ "ipaddr": {
+ "enabled": true,
+ "port_flap_timeout_sec": 20,
+ "timeout_sec": 1
+ }
+ }
+ }
+EOF
+ else
+ cat < local/site/mac_addrs/$intf_mac/module_config.json
+ {
+ "modules": {
+ "ipaddr": {
+ "enabled": true,
+ "port_flap_timeout_sec": 20
+ }
+ }
+ }
EOF
fi
fi
@@ -54,21 +85,30 @@ end_time=`date -u -Isec`
cat inst/result.log
results=$(fgrep [] inst/result.log | wc -l)
timeouts=$(fgrep "ipaddr:TimeoutError" inst/result.log | wc -l)
+ipaddr_timeouts=$(fgrep "ipaddr:TimeoutError" inst/result.log | wc -l)
+ip_notifications=$(fgrep "ip notification" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l)
+alternate_subnet_ip=$(fgrep "ip notification 192.168" inst/run-port-*/nodes/ipaddr*/activate.log | wc -l)
cat inst/run-port-*/scans/ip_triggers.txt
static_ips=$(fgrep nope inst/run-port-*/scans/ip_triggers.txt | wc -l)
more inst/run-port-*/nodes/ping*/activate.log | cat
+more inst/run-port-*/nodes/ipaddr*/activate.log | cat
echo Found $results clean runs, $timeouts timeouts, and $static_ips static_ips.
+echo ipaddr had $ip_notifications notifications and $ipaddr_timeouts timeouts.
# This is broken -- should have many more results available!
-echo Enough results: $((results >= 6*RUN_LIMIT/10)) | tee -a $TEST_RESULTS
+echo Enough results: $((results >= 5*RUN_LIMIT/10)) | tee -a $TEST_RESULTS
# $timeouts should strictly equal $NUM_TIMEOUT_DEVICES when dhcp step is fixed.
echo Enough DHCP timeouts: $((timeouts >= NUM_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS
echo Enough static ips: $((static_ips >= (NUM_NO_DHCP_DEVICES - NUM_TIMEOUT_DEVICES))) | tee -a $TEST_RESULTS
+echo Enough ipaddr tests: $((ip_notifications >= (NUM_IPADDR_TEST_DEVICES - NUM_IPADDR_TEST_TIMEOUT_DEVICES) * 2 )) | tee -a $TEST_RESULTS
+echo Enough alternate subnet ips: $((alternate_subnet_ip >= (NUM_IPADDR_TEST_DEVICES - NUM_IPADDR_TEST_TIMEOUT_DEVICES) )) | tee -a $TEST_RESULTS
+echo Enough ipaddr timeouts: $((ipaddr_timeouts >= NUM_IPADDR_TEST_TIMEOUT_DEVICES)) | tee -a $TEST_RESULTS
+
echo bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time count=2
bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time count=2
@@ -86,7 +126,8 @@ if [ -f "$gcp_cred" ]; then
bin/combine_reports device=9a:02:57:1e:8f:05 from_time=$start_time to_time=$end_time \
count=2 from_gcp=true
echo GCP results diff | tee -a $GCP_RESULTS
- diff inst/reports/combo_*.md out/report_local.md | tee -a $GCP_RESULTS
+ # TODO: Re-enable as per b/161529445
+ # diff inst/reports/combo_*.md out/report_local.md | tee -a $GCP_RESULTS
fi
echo Done with many | tee -a $TEST_RESULTS
diff --git a/testing/test_modules.out b/testing/test_modules.out
index ff067a6826..f0c130ddd3 100644
--- a/testing/test_modules.out
+++ b/testing/test_modules.out
@@ -31,4 +31,10 @@ Testing nmap bacnet
RESULT pass security.ports.nmap Only allowed ports found open.
Testing nmap telnet
RESULT fail security.ports.nmap Some disallowed ports are open: 23
+Testing ssh
+RESULT skip security.ssh.version Device is not running an SSH server
+Testing ssh ssh
+RESULT pass security.ssh.version Device only supports SSHv2
+Testing ssh sshv1
+RESULT fail security.ssh.version Device supports SSHv1
Testing complete.
diff --git a/testing/test_modules.sh b/testing/test_modules.sh
index bc123230e1..f703b75c77 100755
--- a/testing/test_modules.sh
+++ b/testing/test_modules.sh
@@ -17,6 +17,9 @@ tls alt expiredtls
nmap
nmap bacnet
nmap telnet
+ssh
+ssh ssh
+ssh sshv1
EOF
DAQ_TARGETS=aardvark,faux1,faux2 bin/docker_build force inline
diff --git a/testing/test_preamble.sh b/testing/test_preamble.sh
index 93ca247d89..d01fc546f4 100644
--- a/testing/test_preamble.sh
+++ b/testing/test_preamble.sh
@@ -56,6 +56,7 @@ function redact {
-e 's/[0-9]{4}-.*T.*Z/XXX/' \
-e 's/[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2} [A-Z]{3}/XXX/' \
-e 's/[a-zA-Z]{3} [a-zA-Z]{3}\s+[0-9]{1,2} [0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2} [0-9]{4}/XXX/' \
+ -e 's/[A-Za-z]{3} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/XXX/' \
-e 's/[0-9]{4}-(0|1)[0-9]-(0|1|2|3)[0-9] [0-9]{2}:[0-9]{2}:[0-9]{2}(\+00:00)?/XXX/g' \
-e 's/[0-9]+\.[0-9]{2} seconds/XXX/' \
-e 's/0\.[0-9]+s latency/XXX/' \
diff --git a/topology/alta-dev/faucet.yaml b/topology/alta-dev/faucet.yaml
deleted file mode 100644
index 0dcd804b7e..0000000000
--- a/topology/alta-dev/faucet.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
-dps:
- us-mtv-900-t1sw2-0-1:
- dp_id: 147058200621
- faucet_dp_mac: 0e:00:00:00:01:01
- hardware: GenericTFM
- interfaces:
- 9:
- lldp_beacon: {enable: true}
- lldp_peer_mac: 0e:00:00:00:02:01
- tagged_vlans: [171]
- receive_lldp: true
- 10:
- lldp_beacon: {enable: true}
- lldp_peer_mac: 0e:00:00:00:02:02
- tagged_vlans: [171]
- receive_lldp: true
- 28:
- description: Juniper-Uplink-1
- lacp: 3
- lacp_passthrough: [9, 10]
- lldp_beacon: {enable: true}
- native_vlan: 171
- receive_lldp: true
- lldp_beacon: {max_per_interval: 5, send_interval: 5}
- use_hard_timeout: true
- us-mtv-900-t1sw2-0-2:
- dp_id: 147058200561
- faucet_dp_mac: 0e:00:00:00:01:02
- hardware: GenericTFM
- interfaces:
- 9:
- lldp_beacon: {enable: true}
- lldp_peer_mac: 0e:00:00:00:02:01
- tagged_vlans: [171]
- receive_lldp: true
- 10:
- lldp_beacon: {enable: true}
- lldp_peer_mac: 0e:00:00:00:02:02
- tagged_vlans: [171]
- receive_lldp: true
- 28:
- description: Juniper-Uplink-2
- lacp: 3
- lacp_passthrough: [9, 10]
- lldp_beacon: {enable: true}
- native_vlan: 171
- receive_lldp: true
- lldp_beacon: {max_per_interval: 5, send_interval: 5}
- use_hard_timeout: true
- us-mtv-900-t2sw2-0-1:
- dp_id: 246406200719452
- faucet_dp_mac: 0e:00:00:00:02:01
- hardware: Allied-Telesis
- interface_ranges:
- 1-46: {description: IoT Host, native_vlan: 171}
- interfaces:
- 47:
- lldp_beacon: {enable: true}
- lldp_failover: 48
- loop_protect_external: true
- tagged_vlans: [171]
- receive_lldp: true
- 48:
- lldp_beacon: {enable: true}
- loop_protect_external: true
- tagged_vlans: [171]
- receive_lldp: true
- lldp_beacon: {max_per_interval: 5, send_interval: 5}
- use_hard_timeout: true
- us-mtv-900-t2sw2-0-2:
- dp_id: 246406200719346
- faucet_dp_mac: 0e:00:00:00:02:02
- hardware: Allied-Telesis
- interface_ranges:
- 1-46: {description: IoT Host, native_vlan: 171}
- interfaces:
- 47:
- lldp_beacon: {enable: true}
- loop_protect_external: true
- tagged_vlans: [171]
- receive_lldp: true
- 48:
- lldp_beacon: {enable: true}
- lldp_failover: 47
- loop_protect_external: true
- tagged_vlans: [171]
- receive_lldp: true
- lldp_beacon: {max_per_interval: 5, send_interval: 5}
- use_hard_timeout: true
-version: 2
-vlans:
- 171: {description: BOS-IOT}
diff --git a/topology/alta-dev/gauge.yaml b/topology/alta-dev/gauge.yaml
deleted file mode 100644
index bf0f0e0f1e..0000000000
--- a/topology/alta-dev/gauge.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-dbs:
- prometheus: {prometheus_addr: 0.0.0.0, prometheus_port: 9303, type: prometheus}
-faucet_configs: [/etc/faucet/faucet.yaml]
-watchers:
- flow_table:
- db: prometheus
- dps: [us-mtv-900-t1sw2-0-1, us-mtv-900-t2sw2-0-1, us-mtv-900-t1sw2-0-2, us-mtv-900-t2sw2-0-2]
- interval: 10
- type: flow_table
- port_stats:
- db: prometheus
- dps: [us-mtv-900-t1sw2-0-1, us-mtv-900-t2sw2-0-1, us-mtv-900-t1sw2-0-2, us-mtv-900-t2sw2-0-2]
- interval: 10
- type: port_stats
diff --git a/topology/normalize.sh b/topology/normalize.sh
deleted file mode 100755
index 71fb4da312..0000000000
--- a/topology/normalize.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash -e
-
-ROOT=$(dirname $0)/..
-if [ ! -d "$1" ]; then
- echo $0 [topology dir]
- false
-fi
-
-TDIR=$(realpath $1)
-
-$ROOT/bin/generate_topology raw_topo=$TDIR topo_dir=$TDIR
-
diff --git a/topology/setup.json b/topology/setup.json
deleted file mode 100644
index 7206b7cfdf..0000000000
--- a/topology/setup.json
+++ /dev/null
@@ -1,69 +0,0 @@
-{
- 'faucet_yaml': '/etc/faucet/faucet.yaml',
- 'faucet_dp_mac_format': '0e:00:00:00:%02x:%02x',
- 'lacp_timeout': 5,
- 'default_hardware': 'GenericTFM',
- 'egress_description': 'egress',
- 'combinatorial_port_flood': true,
- 'naming': {
- 'tier1': '-t1sw',
- 'tier2': '-t2sw',
- 'control': '-ctr'
- },
- 'device_description': 'IoT Device',
- 'vlan': {
- 'description': 'Faucet IoT',
- 'name': 'Faucet_IoT'
- },
- 'gauge': {
- 'db_type': 'prometheus',
- 'interval': 10
- },
- 'db_types': {
- 'prometheus': {
- 'prometheus_addr': '0.0.0.0',
- 'prometheus_port': 9303,
- 'type': 'prometheus'
- }
- },
- 'receive_lldp': true,
- 'switch_lldp_beacon': {
- 'max_per_interval': 5,
- 'send_interval': 5
- },
- 'port_lldp_beacon': {
- 'enable': true
- },
- 'loop_protect_external': true,
- "pre_acls": [
- {
- "description": "ICMP Allow",
- "nw_proto": 1
- },
- {
- "description": "ARP Allow",
- "dl_type": "0x0806"
- },
- {
- "description": "DHCP Allow",
- "udp_src": 68,
- "udp_dst": 67
- },
- {
- "description": "DNS Allow",
- "udp_dst": 53
- },
- {
- "description": "DHCP Broadcast",
- "dl_dst": "ff:ff:ff:ff:ff:ff",
- "udp_src": 68,
- "udp_dst": 67
- }
- ],
- "post_acls": [
- {
- "description": "Default Deny",
- "allow": false
- }
- ]
-}
diff --git a/usi/.gitignore b/usi/.gitignore
new file mode 100644
index 0000000000..4b12f8ab84
--- /dev/null
+++ b/usi/.gitignore
@@ -0,0 +1,3 @@
+tmp/*
+target/*
+.idea/*
diff --git a/usi/Dockerfile.usi b/usi/Dockerfile.usi
new file mode 100644
index 0000000000..4fb6601310
--- /dev/null
+++ b/usi/Dockerfile.usi
@@ -0,0 +1,11 @@
+FROM daqf/aardvark:latest
+
+# Do this alone first so it can be re-used by other build files.
+
+RUN $AG update && $AG install openjdk-11-jdk git maven
+
+COPY usi/ usi/
+
+RUN cd usi && mvn clean compile assembly:single
+
+CMD ["./usi/start"]
diff --git a/usi/build.conf b/usi/build.conf
new file mode 100644
index 0000000000..d469dd0503
--- /dev/null
+++ b/usi/build.conf
@@ -0,0 +1,2 @@
+build usi
+add usi
diff --git a/usi/pom.xml b/usi/pom.xml
new file mode 100644
index 0000000000..623acc4d95
--- /dev/null
+++ b/usi/pom.xml
@@ -0,0 +1,146 @@
+
+ 4.0.0
+ com.redstone
+ usi
+ 0.0.1
+jar
+ usi
+
+ UTF-8
+ 1.8
+ 1.8
+
+
+
+
+
+ io.grpc
+ grpc-bom
+ 1.30.0
+ pom
+ import
+
+
+
+
+
+
+ junit
+ junit
+ 4.13
+ test
+
+
+ commons-net
+ commons-net
+ 3.6
+
+
+ io.grpc
+ grpc-netty-shaded
+ 1.30.0
+
+
+ io.grpc
+ grpc-protobuf
+ 1.30.0
+
+
+ io.grpc
+ grpc-stub
+ 1.30.0
+
+
+ org.apache.tomcat
+ annotations-api
+ 6.0.53
+ provided
+
+
+ org.junit.jupiter
+ junit-jupiter
+ 5.6.2
+ compile
+
+
+
+
+
+ kr.motd.maven
+ os-maven-plugin
+ 1.6.2
+
+
+
+
+
+ org.xolstice.maven.plugins
+ protobuf-maven-plugin
+ 0.6.1
+
+ com.google.protobuf:protoc:3.12.0:exe:${os.detected.classifier}
+ grpc-java
+ io.grpc:protoc-gen-grpc-java:1.30.0:exe:${os.detected.classifier}
+
+ ${basedir}/src/main/proto
+
+
+
+
+
+ compile
+ compile-custom
+
+
+
+
+
+ maven-assembly-plugin
+
+
+
+ daq.usi.UsiServer
+
+
+
+ jar-with-dependencies
+
+
+
+
+ maven-clean-plugin
+ 3.1.0
+
+
+
+ maven-resources-plugin
+ 3.1.0
+
+
+ maven-surefire-plugin
+ 2.22.2
+
+
+ maven-jar-plugin
+ 3.2.0
+
+
+ maven-install-plugin
+ 2.5.2
+
+
+ maven-deploy-plugin
+ 2.8.2
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.8.1
+
+ 9
+ 9
+
+
+
+
+
\ No newline at end of file
diff --git a/usi/src/main/java/daq/usi/BaseSwitchController.java b/usi/src/main/java/daq/usi/BaseSwitchController.java
new file mode 100644
index 0000000000..247930b298
--- /dev/null
+++ b/usi/src/main/java/daq/usi/BaseSwitchController.java
@@ -0,0 +1,169 @@
+package daq.usi;
+
+import java.util.HashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+
+public abstract class BaseSwitchController implements SwitchController {
+ /**
+ * Terminal Prompt ends with '#' when enabled, '>' when not enabled.
+ */
+ public static final String CONSOLE_PROMPT_ENDING_ENABLED = "#";
+ public static final String CONSOLE_PROMPT_ENDING_LOGIN = ">";
+ public static final int TELNET_PORT = 23;
+
+ // Define Common Variables Required for All Switch Interrogators
+ protected SwitchTelnetClientSocket telnetClientSocket;
+ protected Thread telnetClientSocketThread;
+ protected String remoteIpAddress;
+ protected boolean debug;
+ protected String username;
+ protected String password;
+ protected boolean userAuthorised = false;
+ protected boolean userEnabled = false;
+ protected String hostname = null;
+ protected boolean commandPending = false;
+
+ public BaseSwitchController(String remoteIpAddress, String username,
+ String password) {
+ this(remoteIpAddress, username, password, false);
+ }
+
+ /**
+ * Abstract Switch controller. Override this class for switch specific implementation
+ *
+ * @param remoteIpAddress switch ip address
+ * @param username switch username
+ * @param password switch password
+ * @param debug for verbose logging
+ */
+ public BaseSwitchController(
+ String remoteIpAddress, String username, String password, boolean debug) {
+ this.remoteIpAddress = remoteIpAddress;
+ this.username = username;
+ this.password = password;
+ this.debug = debug;
+ telnetClientSocket =
+ new SwitchTelnetClientSocket(remoteIpAddress, TELNET_PORT, this, debug);
+ }
+
+ /**
+ * Map a simple table containing a header and 1 row of data to a hashmap
+ * This method will also attempt to correct for mis-aligned tabular data as well as empty
+ * columns values.
+ *
+ * @param rawPacket Raw table response from a switch command
+ * @param colNames Array containing the names of the columns in the response
+ * @param mapNames Array containing names key names to map values to
+ * @return A HashMap containing the values mapped to the key names provided in the mapNames array
+ */
+ protected static HashMap mapSimpleTable(
+ String rawPacket, String[] colNames, String[] mapNames) {
+ HashMap colMap = new HashMap<>();
+ String[] lines = rawPacket.split("\n");
+ if (lines.length > 0) {
+ String header = lines[0].trim();
+ String values = lines[1].trim();
+ int lastSectionEnd = 0;
+ for (int i = 0; i < colNames.length; ++i) {
+ int secStart = lastSectionEnd;
+ int secEnd;
+ if ((i + 1) >= colNames.length) {
+ // Resolving last column
+ secEnd = values.length();
+ } else {
+ // Tabular data is not always reported in perfectly alignment, we need to calculate the
+ // correct values based off of the sections in between white spaces
+ int firstWhiteSpace =
+ getFirstWhiteSpace(values.substring(lastSectionEnd)) + lastSectionEnd;
+ int lastWhiteSpace =
+ getIndexOfNonWhitespaceAfterWhitespace(values.substring(firstWhiteSpace))
+ + firstWhiteSpace;
+ int nextHeaderStart = header.indexOf(colNames[i + 1]);
+ secEnd = Math.min(lastWhiteSpace, nextHeaderStart);
+ }
+ lastSectionEnd = secEnd;
+ String sectionRaw = values.substring(secStart, secEnd).trim();
+ colMap.put(mapNames[i], sectionRaw);
+ }
+ }
+ return colMap;
+ }
+
+ private static int getFirstWhiteSpace(String string) {
+ char[] characters = string.toCharArray();
+ for (int i = 0; i < string.length(); i++) {
+ if (Character.isWhitespace(characters[i])) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ private static int getIndexOfNonWhitespaceAfterWhitespace(String string) {
+ char[] characters = string.toCharArray();
+ boolean lastWhitespace = false;
+ for (int i = 0; i < string.length(); i++) {
+ if (Character.isWhitespace(characters[i])) {
+ lastWhitespace = true;
+ } else if (lastWhitespace) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ protected boolean containsPrompt(String consoleData) {
+ // Prompts usually hostname# or hostname(config)#
+ Pattern r = Pattern.compile(hostname + "\\s*(\\(.+\\))?" + CONSOLE_PROMPT_ENDING_ENABLED, 'g');
+ Matcher m = r.matcher(consoleData);
+ return m.find();
+ }
+
+ protected boolean promptReady(String consoleData) {
+ // Prompts usually hostname# or hostname(config)#
+ Pattern r = Pattern.compile(hostname + "\\s*(\\(.+\\))?" + CONSOLE_PROMPT_ENDING_ENABLED + "$");
+ Matcher m = r.matcher(consoleData);
+ return m.find();
+ }
+
+ /**
+ * Receive the raw data packet from the telnet connection and process accordingly.
+ *
+ * @param consoleData Most recent data read from the telnet socket buffer
+ */
+ public void receiveData(String consoleData) {
+ if (debug) {
+ System.out.println(
+ java.time.LocalTime.now() + " receivedData:\t" + consoleData);
+ }
+ if (consoleData != null) {
+ try {
+ consoleData = consoleData.trim();
+ if (!userAuthorised) {
+ handleLoginMessage(consoleData);
+ } else if (!userEnabled) {
+ handleEnableMessage(consoleData);
+ } else {
+ parseData(consoleData);
+ }
+ } catch (Exception e) {
+ telnetClientSocket.disposeConnection();
+ e.printStackTrace();
+ }
+ }
+ }
+
+ protected abstract void parseData(String consoleData) throws Exception;
+
+ protected abstract void handleLoginMessage(String consoleData) throws Exception;
+
+ protected abstract void handleEnableMessage(String consoleData) throws Exception;
+
+ @Override
+ public void start() {
+ telnetClientSocketThread = new Thread(telnetClientSocket);
+ telnetClientSocketThread.start();
+ }
+}
diff --git a/usi/src/main/java/daq/usi/ResponseHandler.java b/usi/src/main/java/daq/usi/ResponseHandler.java
new file mode 100644
index 0000000000..4fd96af577
--- /dev/null
+++ b/usi/src/main/java/daq/usi/ResponseHandler.java
@@ -0,0 +1,5 @@
+package daq.usi;
+
+public interface ResponseHandler {
+ void receiveData(T data) throws Exception;
+}
diff --git a/usi/src/main/java/daq/usi/SwitchController.java b/usi/src/main/java/daq/usi/SwitchController.java
new file mode 100644
index 0000000000..82ae4ce663
--- /dev/null
+++ b/usi/src/main/java/daq/usi/SwitchController.java
@@ -0,0 +1,21 @@
+package daq.usi;
+
+import grpc.InterfaceResponse;
+import grpc.PowerResponse;
+import grpc.SwitchActionResponse;
+
+public interface SwitchController {
+
+ void getPower(int devicePort, ResponseHandler handler) throws Exception;
+
+ void getInterface(int devicePort, ResponseHandler handler)
+ throws Exception;
+
+ void connect(int devicePort, ResponseHandler handler)
+ throws Exception;
+
+ void disconnect(int devicePort, ResponseHandler handler)
+ throws Exception;
+
+ void start();
+}
diff --git a/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java b/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java
new file mode 100644
index 0000000000..a8349ff8a1
--- /dev/null
+++ b/usi/src/main/java/daq/usi/SwitchTelnetClientSocket.java
@@ -0,0 +1,312 @@
+package daq.usi;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import org.apache.commons.net.telnet.EchoOptionHandler;
+import org.apache.commons.net.telnet.InvalidTelnetOptionException;
+import org.apache.commons.net.telnet.SuppressGAOptionHandler;
+import org.apache.commons.net.telnet.TelnetClient;
+import org.apache.commons.net.telnet.TelnetNotificationHandler;
+import org.apache.commons.net.telnet.TerminalTypeOptionHandler;
+
+public class SwitchTelnetClientSocket implements TelnetNotificationHandler, Runnable {
+ public static String MORE_INDICATOR = "--More--";
+
+ protected static final int SLEEP_MS = 100;
+ // Rx empty space timeout before sending \n
+ protected static final int MAX_EMPTY_WAIT_COUNT = 70;
+
+ protected TelnetClient telnetClient;
+ protected BaseSwitchController interrogator;
+
+ protected String remoteIpAddress = "";
+ protected int remotePort = 23;
+
+ protected InputStream inputStream;
+ protected OutputStream outputStream;
+
+ protected Queue rxQueue = new LinkedList<>();
+
+ protected Thread readerThread;
+ protected Thread gatherThread;
+
+ protected boolean debug;
+
+ /**
+ * Telnet Client.
+ * @param remoteIpAddress switch ip address
+ * @param remotePort telent port
+ * @param interrogator switch specific switch controller
+ * @param debug For more verbose output.
+ */
+ public SwitchTelnetClientSocket(
+ String remoteIpAddress, int remotePort, BaseSwitchController interrogator, boolean debug) {
+ this.remoteIpAddress = remoteIpAddress;
+ this.remotePort = remotePort;
+ this.interrogator = interrogator;
+ this.debug = debug;
+ telnetClient = new TelnetClient();
+ addOptionHandlers();
+ }
+
+ protected void connectTelnetSocket() {
+ int attempts = 0;
+
+ while (!telnetClient.isConnected() && attempts < 10) {
+ try {
+ telnetClient.connect(remoteIpAddress, remotePort);
+ } catch (IOException e) {
+ System.err.println("Exception while connecting:" + e.getMessage());
+ }
+
+ attempts++;
+
+ try {
+ Thread.sleep(SLEEP_MS);
+ } catch (InterruptedException e) {
+ System.err.println("Exception while connecting:" + e.getMessage());
+ }
+ }
+ }
+
+ @Override
+ public void run() {
+ connectTelnetSocket();
+
+ Runnable readDataRunnable =
+ () -> {
+ readData();
+ };
+ readerThread = new Thread(readDataRunnable);
+
+ readerThread.start();
+
+ Runnable gatherDataRunnable =
+ () -> {
+ gatherData();
+ };
+ gatherThread = new Thread(gatherDataRunnable);
+
+ gatherThread.start();
+
+ outputStream = telnetClient.getOutputStream();
+ }
+
+ protected void gatherData() {
+ StringBuilder rxData = new StringBuilder();
+
+ int rxQueueCount = 0;
+
+ while (telnetClient.isConnected()) {
+ try {
+ if (rxQueue.isEmpty()) {
+ Thread.sleep(SLEEP_MS);
+ rxQueueCount++;
+ if (!interrogator.commandPending && rxQueueCount > MAX_EMPTY_WAIT_COUNT) {
+ if (debug) {
+ System.out.println("rxQueue Empty. Sending new line.");
+ }
+ rxQueueCount = 0;
+ writeData("\n");
+ }
+ continue;
+ }
+ rxQueueCount = 0;
+ while (rxQueue.peek().trim() == "") {
+ rxQueue.poll();
+ }
+ String rxTemp = rxQueue.poll();
+ if (rxTemp.indexOf(MORE_INDICATOR) > 0) {
+ writeData("\n");
+ if (debug) {
+ System.out.println("more position:" + rxTemp.indexOf(MORE_INDICATOR));
+ System.out.println("Data: " + rxTemp);
+ }
+ rxTemp = rxTemp.replace(MORE_INDICATOR, "");
+ rxData.append(rxTemp);
+ } else if (interrogator.userAuthorised
+ && !interrogator.promptReady((rxData.toString() + rxTemp).trim())) {
+ rxData.append(rxTemp);
+ if (debug) {
+ System.out.println("Waiting for more data till prompt ready: ");
+ System.out.println(rxData.toString().trim());
+ }
+ } else {
+ rxQueueCount = 0;
+ rxData.append(rxTemp);
+ String rxGathered = rxData.toString().trim();
+ rxData = new StringBuilder();
+ interrogator.receiveData(rxGathered);
+ }
+ } catch (InterruptedException e) {
+ System.err.println("InterruptedException gatherData:" + e.getMessage());
+ }
+ }
+ }
+
+ /**
+ * * Callback method called when TelnetClient receives an option negotiation command.
+ *
+ * @param negotiationCode - type of negotiation command received (RECEIVED_DO, RECEIVED_DONT,
+ * RECEIVED_WILL, RECEIVED_WONT, RECEIVED_COMMAND)
+ * @param optionCode - code of the option negotiated *
+ */
+ public void receivedNegotiation(int negotiationCode, int optionCode) {
+ String command = null;
+ switch (negotiationCode) {
+ case TelnetNotificationHandler.RECEIVED_DO:
+ command = "DO";
+ break;
+ case TelnetNotificationHandler.RECEIVED_DONT:
+ command = "DONT";
+ break;
+ case TelnetNotificationHandler.RECEIVED_WILL:
+ command = "WILL";
+ break;
+ case TelnetNotificationHandler.RECEIVED_WONT:
+ command = "WONT";
+ break;
+ case TelnetNotificationHandler.RECEIVED_COMMAND:
+ command = "COMMAND";
+ break;
+ default:
+ command = Integer.toString(negotiationCode); // Should not happen
+ break;
+ }
+ System.out.println("Received " + command + " for option code " + optionCode);
+ }
+
+ private void addOptionHandlers() {
+ TerminalTypeOptionHandler terminalTypeOptionHandler =
+ new TerminalTypeOptionHandler("VT100", false, false, true, false);
+
+ EchoOptionHandler echoOptionHandler = new EchoOptionHandler(false, false, false, false);
+
+ SuppressGAOptionHandler suppressGaOptionHandler =
+ new SuppressGAOptionHandler(true, true, true, true);
+
+ try {
+ telnetClient.addOptionHandler(terminalTypeOptionHandler);
+ telnetClient.addOptionHandler(echoOptionHandler);
+ telnetClient.addOptionHandler(suppressGaOptionHandler);
+ } catch (InvalidTelnetOptionException e) {
+ System.err.println(
+ "Error registering option handlers InvalidTelnetOptionException: " + e.getMessage());
+ } catch (IOException e) {
+ System.err.println("Error registering option handlers IOException: " + e.getMessage());
+ }
+ }
+
+ private String normalizeLineEnding(byte[] bytes, char endChar) {
+ List bytesBuffer = new ArrayList();
+
+ int countBreak = 0;
+ int countEsc = 0;
+
+ for (int i = 0; i < bytes.length; i++) {
+ if (bytes[i] != 0) {
+ switch (bytes[i]) {
+ case 8:
+ // backspace \x08
+ break;
+ case 10:
+ // newLineFeed \x0A
+ countBreak++;
+ bytesBuffer.add((byte) endChar);
+ break;
+ case 13:
+ // carriageReturn \x0D
+ countBreak++;
+ bytesBuffer.add((byte) endChar);
+ break;
+ case 27:
+ // escape \x1B
+ countEsc = 2;
+ break;
+ case 33:
+ // character:!
+ break;
+ default:
+ if (countEsc == 0) {
+ if (countBreak > 1) {
+ int size = bytesBuffer.size();
+ for (int x = 0; x < countBreak - 1; x++) {
+ bytesBuffer.remove(size - 1 - x);
+ }
+ countBreak = 0;
+ }
+ bytesBuffer.add(bytes[i]);
+ } else {
+ countEsc--;
+ }
+ break;
+ }
+ }
+ }
+
+ String bytesString = "";
+
+ for (Byte byteBuffer : bytesBuffer) {
+ bytesString = bytesString + (char) (byte) byteBuffer;
+ }
+
+ return bytesString;
+ }
+
+ protected void readData() {
+ int bytesRead = 0;
+
+ inputStream = telnetClient.getInputStream();
+
+ while (telnetClient.isConnected()) {
+ try {
+ byte[] buffer = new byte[1024];
+
+ bytesRead = inputStream.read(buffer);
+ if (bytesRead > 0) {
+ String rawData = normalizeLineEnding(buffer, '\n');
+ rxQueue.add(rawData);
+ // Useful for debugging
+ // rxQueue.add(new String(buffer, 0, bytesRead, StandardCharsets.UTF_8));
+ } else {
+ try {
+ Thread.sleep(SLEEP_MS);
+ } catch (InterruptedException e) {
+ System.err.println("InterruptedException readData:" + e.getMessage());
+ }
+ }
+ } catch (IOException e) {
+ System.err.println("Exception while reading socket:" + e.getMessage());
+ }
+ }
+ }
+
+ public void writeData(String data) {
+ writeOutputStream(data);
+ }
+
+ private void writeOutputStream(String data) {
+ try {
+ outputStream.write(data.getBytes());
+ outputStream.flush();
+ } catch (IOException e) {
+ System.err.println("Exception while writing socket:" + e.getMessage());
+ }
+ }
+
+ /**
+ * Closes telnet connection.
+ */
+ public void disposeConnection() {
+ try {
+ telnetClient.disconnect();
+ } catch (IOException e) {
+ System.err.println("Exception while disposeConnection:" + e.getMessage());
+ }
+ }
+}
diff --git a/usi/src/main/java/daq/usi/UsiImpl.java b/usi/src/main/java/daq/usi/UsiImpl.java
new file mode 100644
index 0000000000..840bfe3e90
--- /dev/null
+++ b/usi/src/main/java/daq/usi/UsiImpl.java
@@ -0,0 +1,110 @@
+package daq.usi;
+
+import daq.usi.allied.AlliedTelesisX230;
+import daq.usi.cisco.Cisco9300;
+import daq.usi.ovs.OpenVSwitch;
+import grpc.InterfaceResponse;
+import grpc.PowerResponse;
+import grpc.SwitchActionResponse;
+import grpc.SwitchInfo;
+import grpc.USIServiceGrpc;
+import io.grpc.stub.StreamObserver;
+import java.util.HashMap;
+import java.util.Map;
+
+public class UsiImpl extends USIServiceGrpc.USIServiceImplBase {
+ private final Map switchControllers;
+
+ public UsiImpl() {
+ super();
+ switchControllers = new HashMap<>();
+ }
+
+ private SwitchController createController(SwitchInfo switchInfo) {
+ SwitchController newController;
+ switch (switchInfo.getModel()) {
+ case ALLIED_TELESIS_X230: {
+ newController = new AlliedTelesisX230(switchInfo.getIpAddr(), switchInfo.getUsername(),
+ switchInfo.getPassword());
+ break;
+ }
+ case CISCO_9300: {
+ newController = new Cisco9300(switchInfo.getIpAddr(), switchInfo.getUsername(),
+ switchInfo.getPassword());
+ break;
+ }
+ case OVS_SWITCH: {
+ newController = new OpenVSwitch();
+ break;
+ }
+ default:
+ throw new IllegalArgumentException("Unrecognized switch model " + switchInfo.getModel());
+ }
+ newController.start();
+ return newController;
+ }
+
+ private SwitchController getSwitchController(SwitchInfo switchInfo) {
+ String repr = String.join(",", switchInfo.getModel().toString(), switchInfo.getIpAddr(),
+ switchInfo.getUsername(),
+ switchInfo.getPassword());
+ return switchControllers.computeIfAbsent(repr, key -> createController(switchInfo));
+ }
+
+ @Override
+ public void getPower(SwitchInfo request, StreamObserver responseObserver) {
+ SwitchController sc = getSwitchController(request);
+ try {
+ sc.getPower(request.getDevicePort(), data -> {
+ responseObserver.onNext(data);
+ responseObserver.onCompleted();
+ });
+ } catch (Exception e) {
+ e.printStackTrace();
+ responseObserver.onError(e);
+ }
+ }
+
+ @Override
+ public void getInterface(SwitchInfo request, StreamObserver responseObserver) {
+ SwitchController sc = getSwitchController(request);
+ try {
+ sc.getInterface(request.getDevicePort(), data -> {
+ responseObserver.onNext(data);
+ responseObserver.onCompleted();
+ });
+ } catch (Exception e) {
+ e.printStackTrace();
+ responseObserver.onError(e);
+ }
+ }
+
+ @Override
+ public void connect(SwitchInfo request, StreamObserver responseObserver) {
+ SwitchController sc = getSwitchController(request);
+ try {
+ sc.connect(request.getDevicePort(), data -> {
+ responseObserver.onNext(data);
+ responseObserver.onCompleted();
+ });
+ } catch (Exception e) {
+ e.printStackTrace();
+ responseObserver.onError(e);
+ }
+ }
+
+ @Override
+ public void disconnect(SwitchInfo request,
+ StreamObserver responseObserver) {
+ SwitchController sc = getSwitchController(request);
+ try {
+ sc.disconnect(request.getDevicePort(), data -> {
+ responseObserver.onNext(data);
+ responseObserver.onCompleted();
+ });
+ } catch (Exception e) {
+ e.printStackTrace();
+ responseObserver.onError(e);
+ }
+ }
+}
diff --git a/usi/src/main/java/daq/usi/UsiServer.java b/usi/src/main/java/daq/usi/UsiServer.java
new file mode 100644
index 0000000000..b5ce26374a
--- /dev/null
+++ b/usi/src/main/java/daq/usi/UsiServer.java
@@ -0,0 +1,59 @@
+package daq.usi;
+
+import io.grpc.Server;
+import io.grpc.ServerBuilder;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+public class UsiServer {
+ private Server server;
+
+ private void start() throws IOException {
+ /* The port on which the server should run */
+ int port = 5000;
+ server = ServerBuilder.forPort(port)
+ .addService(new UsiImpl())
+ .build()
+ .start();
+ System.out.println("Server started, listening on " + port);
+ Runtime.getRuntime().addShutdownHook(new Thread() {
+ @Override
+ public void run() {
+ // Use stderr here since the logger may have been reset by its JVM shutdown hook.
+ System.err.println("*** shutting down gRPC server since JVM is shutting down");
+ try {
+ UsiServer.this.stop();
+ } catch (InterruptedException e) {
+ e.printStackTrace(System.err);
+ }
+ System.err.println("*** server shut down");
+ }
+ });
+ }
+
+ private void stop() throws InterruptedException {
+ if (server != null) {
+ server.shutdown().awaitTermination(30, TimeUnit.SECONDS);
+ }
+ }
+
+ /**
+ * Await termination on the main thread since the grpc library uses daemon threads.
+ */
+ private void blockUntilShutdown() throws InterruptedException {
+ if (server != null) {
+ server.awaitTermination();
+ }
+ }
+
+ /**
+ * Main method.
+ * @param args not used.
+ * @throws Exception Maybe a refactor is needed to throw more specific exceptions.
+ */
+ public static void main(String[] args) throws Exception {
+ final UsiServer server = new UsiServer();
+ server.start();
+ server.blockUntilShutdown();
+ }
+}
diff --git a/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java b/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java
new file mode 100644
index 0000000000..50007a2086
--- /dev/null
+++ b/usi/src/main/java/daq/usi/allied/AlliedTelesisX230.java
@@ -0,0 +1,274 @@
+package daq.usi.allied;
+
+import daq.usi.BaseSwitchController;
+import daq.usi.ResponseHandler;
+import grpc.InterfaceResponse;
+import grpc.LinkStatus;
+import grpc.POEStatus;
+import grpc.POESupport;
+import grpc.PowerResponse;
+import grpc.SwitchActionResponse;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Queue;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+
+public class AlliedTelesisX230 extends BaseSwitchController {
+ private static final String[] powerExpected =
+ {"dev_interface", "admin", "pri", "oper", "power", "device", "dev_class", "max"};
+ private static final String[] showPowerExpected =
+ {"Interface", "Admin", "Pri", "Oper", "Power", "Device", "Class", "Max"};
+ private static final Map poeStatusMap = Map.of("Powered", POEStatus.ON,
+ "Off", POEStatus.OFF, "Fault", POEStatus.FAULT, "Deny", POEStatus.DENY);
+ // TODO Not certain about AT power "Deny" status string. Can't find a device to produce that state
+ private static final Map poeSupportMap = Map.of("Enabled",
+ POESupport.ENABLED, "Disabled", POESupport.DISABLED);
+ private static final Map interfaceProcessMap =
+ Map.of(Pattern.compile("Link is (\\w+)"), "link",
+ Pattern.compile("current duplex (\\w+)"), "duplex",
+ Pattern.compile("current speed (\\w+)"), "speed");
+
+ private static final int WAIT_MS = 100;
+ private ResponseHandler responseHandler;
+
+ /**
+ * ATX230 Switch Controller.
+ *
+ * @param remoteIpAddress switch ip address
+ * @param user switch username
+ * @param password switch password
+ */
+ public AlliedTelesisX230(
+ String remoteIpAddress,
+ String user,
+ String password) {
+ this(remoteIpAddress, user, password, false);
+ }
+
+ /**
+ * ATX230 Switch Controller.
+ *
+ * @param remoteIpAddress switch ip address
+ * @param user switch username
+ * @param password switch password
+ * @param debug for verbose output
+ */
+ public AlliedTelesisX230(
+ String remoteIpAddress,
+ String user,
+ String password, boolean debug) {
+ super(remoteIpAddress, user, password, debug);
+ this.username = user == null ? "manager" : user;
+ this.password = password == null ? "friend" : password;
+ }
+
+ @Override
+ protected void parseData(String consoleData) throws Exception {
+ if (commandPending) {
+ responseHandler.receiveData(consoleData);
+ }
+ }
+
+ /**
+ * Generic ATX230 Switch command to retrieve the Status of an interface.
+ */
+ private String showIfaceStatusCommand(int interfacePort) {
+ return "show interface port1.0." + interfacePort;
+ }
+
+ /**
+ * Generic ATX230 Switch command to retrieve the Power Status of an interface. Replace asterisk
+ * with actual port number for complete message.
+ */
+ private String showIfacePowerStatusCommand(int interfacePort) {
+ return "show power-inline interface port1.0." + interfacePort;
+ }
+
+ /**
+ * Port toggle commands.
+ *
+ * @param interfacePort port number
+ * @param enabled for bringing up/down interfacePort
+ * @return commands
+ */
+ private String[] portManagementCommand(int interfacePort, boolean enabled) {
+ return new String[] {
+ "configure terminal",
+ "interface port1.0." + interfacePort,
+ (enabled ? "no " : "") + "shutdown",
+ "end"
+ };
+ }
+
+
+ @Override
+ public void getPower(int devicePort, ResponseHandler handler) throws Exception {
+ while (commandPending) {
+ Thread.sleep(WAIT_MS);
+ }
+ String command = showIfacePowerStatusCommand(devicePort);
+ synchronized (this) {
+ commandPending = true;
+ responseHandler = data -> {
+ Map powerMap = processPowerStatusInline(data);
+ handler.receiveData(buildPowerResponse(powerMap));
+ synchronized (this) {
+ commandPending = false;
+ }
+ };
+ telnetClientSocket.writeData(command + "\n");
+ }
+ }
+
+ @Override
+ public void getInterface(int devicePort, ResponseHandler handler)
+ throws Exception {
+ while (commandPending) {
+ Thread.sleep(WAIT_MS);
+ }
+ String command = showIfaceStatusCommand(devicePort);
+ synchronized (this) {
+ commandPending = true;
+ responseHandler = data -> {
+ Map interfaceMap = processInterfaceStatus(data);
+ handler.receiveData(buildInterfaceResponse(interfaceMap));
+ synchronized (this) {
+ commandPending = false;
+ }
+ };
+ telnetClientSocket.writeData(command + "\n");
+ }
+ }
+
+ private void managePort(int devicePort, ResponseHandler handler,
+ boolean enabled) throws Exception {
+ while (commandPending) {
+ Thread.sleep(WAIT_MS);
+ }
+ Queue commands =
+ new LinkedList<>(Arrays.asList(portManagementCommand(devicePort, enabled)));
+ SwitchActionResponse.Builder response = SwitchActionResponse.newBuilder();
+ synchronized (this) {
+ commandPending = true;
+ responseHandler = data -> {
+ if (!commands.isEmpty()) {
+ telnetClientSocket.writeData(commands.poll() + "\n");
+ return;
+ }
+ synchronized (this) {
+ commandPending = false;
+ handler.receiveData(response.setSuccess(true).build());
+ }
+ };
+ telnetClientSocket.writeData(commands.poll() + "\n");
+ }
+ }
+
+ @Override
+ public void connect(int devicePort, ResponseHandler handler)
+ throws Exception {
+ managePort(devicePort, handler, true);
+ }
+
+ @Override
+ public void disconnect(int devicePort, ResponseHandler handler)
+ throws Exception {
+ managePort(devicePort, handler, false);
+ }
+
+ private InterfaceResponse buildInterfaceResponse(Map interfaceMap) {
+ InterfaceResponse.Builder response = InterfaceResponse.newBuilder();
+ String duplex = interfaceMap.getOrDefault("duplex", "");
+ int speed = 0;
+ try {
+ speed = Integer.parseInt(interfaceMap.get("speed"));
+ } catch (NumberFormatException e) {
+ System.out.println("Could not parse int: " + interfaceMap.get("speed"));
+ }
+ String linkStatus = interfaceMap.getOrDefault("link", "");
+ return response.setLinkStatus(linkStatus.equals("UP") ? LinkStatus.UP : LinkStatus.DOWN)
+ .setDuplex(duplex)
+ .setLinkSpeed(speed)
+ .build();
+ }
+
+ private PowerResponse buildPowerResponse(Map powerMap) {
+ PowerResponse.Builder response = PowerResponse.newBuilder();
+ float maxPower = 0;
+ float currentPower = 0;
+ try {
+ maxPower = Float.parseFloat(powerMap.get("max"));
+ currentPower = Float.parseFloat(powerMap.get("power"));
+ } catch (NumberFormatException e) {
+ System.out.println(
+ "Could not parse float: " + powerMap.get("max") + " or " + powerMap.get("power"));
+ }
+ String poeSupport = powerMap.getOrDefault("admin", null);
+ String poeStatus = powerMap.getOrDefault("oper", null);
+ return response.setPoeStatus(poeStatusMap.getOrDefault(poeStatus, POEStatus.OFF))
+ .setPoeSupport(poeSupportMap.getOrDefault(poeSupport, POESupport.DISABLED))
+ .setMaxPowerConsumption(maxPower)
+ .setCurrentPowerConsumption(currentPower).build();
+ }
+
+ private Map processInterfaceStatus(String response) {
+ Map interfaceMap = new HashMap<>();
+ Arrays.stream(response.split("\n")).filter(s -> !containsPrompt(s)).forEach(s -> {
+ for (Pattern pattern : interfaceProcessMap.keySet()) {
+ Matcher m = pattern.matcher(s);
+ if (m.find()) {
+ interfaceMap.put(interfaceProcessMap.get(pattern), m.group(1));
+ }
+ }
+ });
+ return interfaceMap;
+ }
+
+ private Map processPowerStatusInline(String response) {
+ String filtered = Arrays.stream(response.split("\n"))
+ .filter(s -> s.trim().length() > 0
+ && !s.contains("show power-inline")
+ && !containsPrompt(s)
+ && !s.contains("(mW)")) // AT shows mW in second line
+ .collect(Collectors.joining("\n"));
+ return mapSimpleTable(filtered, showPowerExpected, powerExpected);
+ }
+
+ /**
+ * Handles the process when using the enter command. Enable is a required step before commands can
+ * be sent to the switch.
+ *
+ * @param consoleData Raw console data received the the telnet connection.
+ */
+ public void handleEnableMessage(String consoleData) throws Exception {
+ if (containsPrompt(consoleData)) {
+ userEnabled = true;
+ }
+ }
+
+ /**
+ * Handles the process when logging into the switch.
+ *
+ * @param consoleData Raw console data received the the telnet connection.
+ */
+ public void handleLoginMessage(String consoleData) throws Exception {
+ if (consoleData.endsWith("login:")) {
+ telnetClientSocket.writeData(username + "\n");
+ } else if (consoleData.contains("Password:")) {
+ telnetClientSocket.writeData(password + "\n");
+ } else if (consoleData.contains(CONSOLE_PROMPT_ENDING_LOGIN)) {
+ userAuthorised = true;
+ hostname = consoleData.split(CONSOLE_PROMPT_ENDING_LOGIN)[0];
+ telnetClientSocket.writeData("enable\n");
+ } else if (consoleData.contains("Login incorrect")) {
+ telnetClientSocket.disposeConnection();
+ throw new Exception("Failed to Login, Bad Password");
+ }
+ }
+
+}
diff --git a/usi/src/main/java/daq/usi/cisco/Cisco9300.java b/usi/src/main/java/daq/usi/cisco/Cisco9300.java
new file mode 100644
index 0000000000..1dd3683e6c
--- /dev/null
+++ b/usi/src/main/java/daq/usi/cisco/Cisco9300.java
@@ -0,0 +1,273 @@
+package daq.usi.cisco;
+
+import daq.usi.BaseSwitchController;
+import daq.usi.ResponseHandler;
+import grpc.InterfaceResponse;
+import grpc.LinkStatus;
+import grpc.POEStatus;
+import grpc.POESupport;
+import grpc.PowerResponse;
+import grpc.SwitchActionResponse;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Queue;
+import java.util.stream.Collectors;
+
+
+public class Cisco9300 extends BaseSwitchController {
+
+ private static final String[] interfaceExpected =
+ {"interface", "name", "status", "vlan", "duplex", "speed", "type"};
+ private static final String[] showInterfaceExpected =
+ {"Port", "Name", "Status", "Vlan", "Duplex", "Speed", "Type"};
+ private static final Map powerInlineMap = Map.of("Interface", "dev_interface",
+ "Inline Power Mode", "admin",
+ "Operational status", "oper",
+ "Measured at the port", "power",
+ "Device Type", "device",
+ "IEEE Class", "dev_class",
+ "Power available to the device", "max");
+ private static final Map poeStatusMap = Map.of("on", POEStatus.ON,
+ "off", POEStatus.OFF, "fault", POEStatus.FAULT, "power-deny", POEStatus.DENY);
+ private static final Map poeSupportMap = Map.of("auto", POESupport.ENABLED,
+ "off", POESupport.DISABLED);
+ private static final int WAIT_MS = 100;
+ private ResponseHandler responseHandler;
+
+ /**
+ * Cisco 9300 Switch Controller.
+ *
+ * @param remoteIpAddress switch ip
+ * @param user switch username
+ * @param password switch password
+ */
+ public Cisco9300(
+ String remoteIpAddress,
+ String user,
+ String password) {
+ super(remoteIpAddress, user, password);
+ this.username = user == null ? "admin" : user;
+ this.password = password == null ? "password" : password;
+ }
+
+ /**
+ * Generic Cisco Switch command to retrieve the Status of an interface.
+ */
+ private String showIfaceStatusCommand(int interfacePort) {
+ return "show interface gigabitethernet1/0/" + interfacePort + " status";
+ }
+
+ /**
+ * Generic Cisco Switch command to retrieve the Power Status of an interface. Replace asterisk
+ * with actual port number for complete message
+ */
+ private String showIfacePowerStatusCommand(int interfacePort) {
+ return "show power inline gigabitethernet1/0/" + interfacePort + " detail";
+ }
+
+ /**
+ * Get port toggle commands.
+ *
+ * @param interfacePort port number
+ * @param enabled for bringing up/down interfacePort
+ * @return commands
+ */
+ private String[] portManagementCommand(int interfacePort, boolean enabled) {
+ return new String[] {
+ "configure terminal",
+ "interface FastEthernet0/" + interfacePort,
+ (enabled ? "no " : "") + "shutdown",
+ "end"
+ };
+ }
+
+ /**
+ * Handles the process when using the enter command. Enable is a required step before commands can
+ * be sent to the switch.
+ *
+ * @param consoleData Raw console data received the the telnet connection.
+ */
+ @Override
+ public void handleEnableMessage(String consoleData) throws Exception {
+ if (consoleData.contains("Password:")) {
+ telnetClientSocket.writeData(password + "\n");
+ } else if (containsPrompt(consoleData)) {
+ userEnabled = true;
+ } else if (consoleData.contains("% Bad passwords")) {
+ telnetClientSocket.disposeConnection();
+ throw new Exception("Could not Enable the User, Bad Password");
+ }
+ }
+
+ /**
+ * Handles the process when logging into the switch.
+ *
+ * @param consoleData Raw console data received the the telnet connection.
+ */
+ @Override
+ public void handleLoginMessage(String consoleData) throws Exception {
+ if (consoleData.contains("Username:")) {
+ telnetClientSocket.writeData(username + "\n");
+ } else if (consoleData.contains("Password:")) {
+ telnetClientSocket.writeData(password + "\n");
+ } else if (consoleData.endsWith(CONSOLE_PROMPT_ENDING_LOGIN)) {
+ userAuthorised = true;
+ hostname = consoleData.split(CONSOLE_PROMPT_ENDING_LOGIN)[0];
+ telnetClientSocket.writeData("enable\n");
+ } else if (consoleData.contains("% Login invalid")) {
+ telnetClientSocket.disposeConnection();
+ throw new Exception("Failed to Login, Login Invalid");
+ } else if (consoleData.contains("% Bad passwords")) {
+ telnetClientSocket.disposeConnection();
+ throw new Exception("Failed to Login, Bad Password");
+ }
+ }
+
+ /**
+ * Handles current data in the buffer read from the telnet console InputStream and sends it to the
+ * appropriate process.
+ *
+ * @param consoleData Current unhandled data in the buffered reader
+ */
+ @Override
+ public void parseData(String consoleData) throws Exception {
+ if (commandPending) {
+ responseHandler.receiveData(consoleData);
+ }
+ }
+
+ @Override
+ public void getPower(int devicePort, ResponseHandler powerResponseHandler)
+ throws Exception {
+ while (commandPending) {
+ Thread.sleep(WAIT_MS);
+ }
+ String command = showIfacePowerStatusCommand(devicePort);
+ synchronized (this) {
+ commandPending = true;
+ responseHandler = data -> {
+ Map powerMap = processPowerStatusInline(data);
+ powerResponseHandler.receiveData(buildPowerResponse(powerMap));
+ synchronized (this) {
+ commandPending = false;
+ }
+ };
+ telnetClientSocket.writeData(command + "\n");
+ }
+ }
+
+ @Override
+ public void getInterface(int devicePort, ResponseHandler handler)
+ throws Exception {
+ while (commandPending) {
+ Thread.sleep(WAIT_MS);
+ }
+ String command = showIfaceStatusCommand(devicePort);
+ synchronized (this) {
+ commandPending = true;
+ responseHandler = data -> {
+ Map interfaceMap = processInterfaceStatus(data);
+ handler.receiveData(buildInterfaceResponse(interfaceMap));
+ synchronized (this) {
+ commandPending = false;
+ }
+ };
+ telnetClientSocket.writeData(command + "\n");
+ }
+ }
+
+ private void managePort(int devicePort, ResponseHandler handler,
+ boolean enabled) throws Exception {
+ while (commandPending) {
+ Thread.sleep(WAIT_MS);
+ }
+ Queue commands =
+ new LinkedList<>(Arrays.asList(portManagementCommand(devicePort, enabled)));
+ SwitchActionResponse.Builder response = SwitchActionResponse.newBuilder();
+ synchronized (this) {
+ commandPending = true;
+ responseHandler = data -> {
+ if (!commands.isEmpty()) {
+ telnetClientSocket.writeData(commands.poll() + "\n");
+ return;
+ }
+ synchronized (this) {
+ commandPending = false;
+ handler.receiveData(response.setSuccess(true).build());
+ }
+ };
+ telnetClientSocket.writeData(commands.poll() + "\n");
+ }
+ }
+
+ @Override
+ public void connect(int devicePort, ResponseHandler handler)
+ throws Exception {
+ managePort(devicePort, handler, true);
+ }
+
+ @Override
+ public void disconnect(int devicePort, ResponseHandler handler)
+ throws Exception {
+ managePort(devicePort, handler, false);
+ }
+
+ private InterfaceResponse buildInterfaceResponse(Map interfaceMap) {
+ InterfaceResponse.Builder response = InterfaceResponse.newBuilder();
+ String duplex = interfaceMap.getOrDefault("duplex", "");
+ if (duplex.startsWith("a-")) { // Interface in Auto Duplex
+ duplex = duplex.replaceFirst("a-", "");
+ }
+
+ String speed = interfaceMap.getOrDefault("speed", "");
+ if (speed.startsWith("a-")) { // Interface in Auto Speed
+ speed = speed.replaceFirst("a-", "");
+ }
+
+ String linkStatus = interfaceMap.getOrDefault("status", "");
+ return response.setLinkStatus(linkStatus.equals("connected") ? LinkStatus.UP : LinkStatus.DOWN)
+ .setDuplex(duplex)
+ .setLinkSpeed(Integer.parseInt(speed))
+ .build();
+ }
+
+ private PowerResponse buildPowerResponse(Map powerMap) {
+ PowerResponse.Builder response = PowerResponse.newBuilder();
+ float maxPower = Float.parseFloat(powerMap.get("max"));
+ float currentPower = Float.parseFloat(powerMap.get("power"));
+
+ String poeSupport = powerMap.getOrDefault("admin", null);
+ String poeStatus = powerMap.getOrDefault("oper", null);
+ return response.setPoeStatus(poeStatusMap.getOrDefault(poeStatus, null))
+ .setPoeSupport(poeSupportMap.getOrDefault(poeSupport, null))
+ .setMaxPowerConsumption(maxPower)
+ .setCurrentPowerConsumption(currentPower).build();
+ }
+
+ private Map processInterfaceStatus(String response) {
+ String filtered = Arrays.stream(response.split("\n"))
+ .filter(s -> !containsPrompt(s))
+ .collect(Collectors.joining("\n"));
+ return mapSimpleTable(filtered, showInterfaceExpected, interfaceExpected);
+ }
+
+ private Map processPowerStatusInline(String response) {
+ Map powerMap = new HashMap<>();
+ Arrays.stream(response.split("\n"))
+ .forEach(
+ line -> {
+ String[] lineParts = line.trim().split(":");
+ if (lineParts.length > 1) {
+ String powerMapKey = powerInlineMap.getOrDefault(lineParts[0], null);
+ if (powerMapKey != null) {
+ powerMap.put(powerMapKey, lineParts[1].trim());
+ }
+ }
+ });
+ return powerMap;
+ }
+
+
+}
diff --git a/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java b/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java
new file mode 100644
index 0000000000..ba38631e7e
--- /dev/null
+++ b/usi/src/main/java/daq/usi/ovs/OpenVSwitch.java
@@ -0,0 +1,87 @@
+package daq.usi.ovs;
+
+import daq.usi.ResponseHandler;
+import daq.usi.SwitchController;
+import grpc.InterfaceResponse;
+import grpc.LinkStatus;
+import grpc.POEStatus;
+import grpc.POESupport;
+import grpc.PowerResponse;
+import grpc.SwitchActionResponse;
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.URL;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class OpenVSwitch implements SwitchController {
+
+ private static final String OVS_OUTPUT_FILE = "ovs_output.txt";
+
+ protected String getInterfaceByPort(int devicePort) throws IOException {
+ URL file = OpenVSwitch.class.getClassLoader().getResource(OVS_OUTPUT_FILE);
+ if (file == null) {
+ throw new FileNotFoundException(OVS_OUTPUT_FILE + " is not found!");
+ }
+ FileReader reader = new FileReader(file.getFile());
+ try (BufferedReader bufferedReader = new BufferedReader(reader)) {
+ Pattern pattern = Pattern.compile("(^\\s*" + devicePort + ")(\\((.+)\\))(:.*)", 'g');
+ String interfaceLine = bufferedReader.lines().filter(line -> {
+ Matcher m = pattern.matcher(line);
+ return m.find();
+ }).findFirst().get();
+ Matcher m = pattern.matcher(interfaceLine);
+ m.matches();
+ return m.group(3);
+ }
+ }
+
+ @Override
+ public void getPower(int devicePort, ResponseHandler handler) throws Exception {
+ PowerResponse.Builder response = PowerResponse.newBuilder();
+ PowerResponse power = response.setPoeStatus(POEStatus.OFF).setPoeSupport(POESupport.DISABLED)
+ .setMaxPowerConsumption(0).setCurrentPowerConsumption(0).build();
+ handler.receiveData(power);
+ }
+
+ @Override
+ public void getInterface(int devicePort, ResponseHandler handler)
+ throws Exception {
+ InterfaceResponse.Builder response = InterfaceResponse.newBuilder();
+ InterfaceResponse iface =
+ response.setLinkStatus(LinkStatus.UP).setDuplex("").setLinkSpeed(0).build();
+ handler.receiveData(iface);
+ }
+
+ private void managePort(int devicePort, ResponseHandler handler,
+ boolean enabled)
+ throws Exception {
+ String iface = getInterfaceByPort(devicePort);
+ ProcessBuilder processBuilder = new ProcessBuilder();
+ processBuilder.command("bash", "-c", "ifconfig " + iface + (enabled ? " up" : " down"))
+ .inheritIO();
+ Process process = processBuilder.start();
+ boolean exited = process.waitFor(10, TimeUnit.SECONDS);
+ int exitCode = process.exitValue();
+ handler
+ .receiveData(SwitchActionResponse.newBuilder().setSuccess(exited && exitCode == 0).build());
+ }
+
+ @Override
+ public void connect(int devicePort, ResponseHandler handler)
+ throws Exception {
+ managePort(devicePort, handler, true);
+ }
+
+ @Override
+ public void disconnect(int devicePort, ResponseHandler handler)
+ throws Exception {
+ managePort(devicePort, handler, false);
+ }
+
+ public void start() {
+ }
+}
diff --git a/usi/src/main/proto/usi.proto b/usi/src/main/proto/usi.proto
new file mode 100644
index 0000000000..6107b0afc7
--- /dev/null
+++ b/usi/src/main/proto/usi.proto
@@ -0,0 +1,77 @@
+/*
+ * Specification for Universal Switch Interface.
+ */
+syntax = "proto3";
+package usi;
+
+option java_multiple_files = true;
+option java_outer_classname = "USIProto";
+option java_package = "grpc";
+
+service USIService {
+ rpc GetPower(SwitchInfo) returns (PowerResponse) {}
+ rpc GetInterface(SwitchInfo) returns (InterfaceResponse) {}
+ rpc disconnect(SwitchInfo) returns (SwitchActionResponse) {}
+ rpc connect(SwitchInfo) returns (SwitchActionResponse) {}
+}
+
+message SwitchActionResponse {
+ bool success = 1;
+}
+
+message PowerResponse {
+ float current_power_consumption = 1;
+ float max_power_consumption = 2;
+ POESupport poe_support = 3;
+ POEStatus poe_status = 4;
+}
+
+message InterfaceResponse {
+ LinkStatus link_status = 1;
+ int32 link_speed = 2;
+ string duplex = 3;
+}
+
+enum SwitchModel {
+ ALLIED_TELESIS_X230 = 0;
+ CISCO_9300 = 1;
+ OVS_SWITCH = 2;
+}
+
+enum LinkStatus {
+ UP = 0;
+ DOWN = 1;
+}
+
+enum POESupport {
+ ENABLED = 0;
+ DISABLED = 1;
+}
+
+enum POEStatus {
+ ON = 0;
+ OFF = 1;
+ FAULT = 2;
+ DENY = 3;
+}
+
+/*
+ * System configuraiton of the access switch. This is used by the system
+ * to setup and configure the switch itself.
+ */
+message SwitchInfo {
+ // IP address of external switch.
+ string ip_addr = 1;
+
+ // Device Port
+ int32 device_port = 3;
+
+ // Switch model
+ SwitchModel model = 4;
+
+ // Switch connect username
+ string username = 5;
+
+ // Switch connect password
+ string password = 6;
+}
\ No newline at end of file
diff --git a/usi/src/test/java/daq/usi/BaseSwitchControllerTest.java b/usi/src/test/java/daq/usi/BaseSwitchControllerTest.java
new file mode 100644
index 0000000000..f2867c1b65
--- /dev/null
+++ b/usi/src/test/java/daq/usi/BaseSwitchControllerTest.java
@@ -0,0 +1,47 @@
+package daq.usi;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+import java.util.Map;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+class BaseSwitchControllerTest {
+
+ @BeforeEach
+ void setUp() {
+ }
+
+ @AfterEach
+ void tearDown() {
+ }
+
+ @Test
+ void mapSimpleTableEmptyInput() {
+ String raw = "";
+ String[] colNames = {"a", "b"};
+ String[] mapNames = {"a", "b"};
+ Map response = BaseSwitchController.mapSimpleTable(raw, colNames, mapNames);
+ for (String key : response.keySet()) {
+ assertNull(response.get(key));
+ }
+ }
+
+ @Test
+ void mapSimpleTableSampleInputAT() {
+ String raw = "Interface Admin Pri Oper Power Device Class Max \n"
+ + "port1.0.1 Enabled Low Powered 3337 n/a 0 15400 [C]";
+ String[] colNames = {"Interface", "Admin", "Pri", "Oper", "Power", "Device", "Class", "Max"};
+ String[] mapNames = {"interface", "admin", "pri", "oper", "power", "device", "class", "max"};
+ Map expected = Map.of("interface", "port1.0.1", "admin", "Enabled", "pri",
+ "Low", "oper", "Powered", "power", "3337", "device", "n/a",
+ "class", "0", "max", "15400 [C]");
+ Map response = BaseSwitchController.mapSimpleTable(raw, colNames, mapNames);
+ for (String key : response.keySet()) {
+ assertEquals(response.get(key), expected.get(key));
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/usi/src/test/java/daq/usi/ovs/OpenVSwitchTest.java b/usi/src/test/java/daq/usi/ovs/OpenVSwitchTest.java
new file mode 100644
index 0000000000..fb951cf29f
--- /dev/null
+++ b/usi/src/test/java/daq/usi/ovs/OpenVSwitchTest.java
@@ -0,0 +1,29 @@
+package daq.usi.ovs;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.io.FileNotFoundException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+class OpenVSwitchTest {
+ OpenVSwitch ovs;
+
+ @BeforeEach
+ void setUp() {
+ ovs = new OpenVSwitch();
+ }
+
+ @AfterEach
+ void tearDown() {
+ }
+
+ @Test
+ void getInterfaceByPort() throws FileNotFoundException {
+ assertEquals(ovs.getInterfaceByPort(1), "faux");
+ assertEquals(ovs.getInterfaceByPort(2), "faux-2");
+ assertEquals(ovs.getInterfaceByPort(7), "sec-eth7");
+ }
+
+}
\ No newline at end of file
diff --git a/usi/src/test/resources/ovs_output.txt b/usi/src/test/resources/ovs_output.txt
new file mode 100644
index 0000000000..9621772035
--- /dev/null
+++ b/usi/src/test/resources/ovs_output.txt
@@ -0,0 +1,24 @@
+OFPT_FEATURES_REPLY (xid=0x2): dpid:0000000000000002
+n_tables:254, n_buffers:0
+capabilities: FLOW_STATS TABLE_STATS PORT_STATS QUEUE_STATS ARP_MATCH_IP
+actions: output enqueue set_vlan_vid set_vlan_pcp strip_vlan mod_dl_src mod_dl_dst mod_nw_src mod_nw_dst mod_nw_tos mod_tp_src mod_tp_dst
+ 1(faux): addr:de:06:c6:06:73:bb
+ config: 0
+ state: 0
+ current: 10GB-FD COPPER
+ speed: 10000 Mbps now, 0 Mbps max
+ 2(faux-2): addr:de:06:c6:06:73:bc
+ config: 0
+ state: 0
+ current: 10GB-FD COPPER
+ speed: 10000 Mbps now, 0 Mbps max
+ 7(sec-eth7): addr:a2:f2:6f:01:84:d4
+ config: 0
+ state: 0
+ current: 10GB-FD COPPER
+ speed: 10000 Mbps now, 0 Mbps max
+ LOCAL(sec): addr:72:87:94:b5:9c:48
+ config: PORT_DOWN
+ state: LINK_DOWN
+ speed: 0 Mbps now, 0 Mbps max
+OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0
diff --git a/usi/start b/usi/start
new file mode 100755
index 0000000000..c3fc5e5b8a
--- /dev/null
+++ b/usi/start
@@ -0,0 +1,2 @@
+#!/bin/bash -e
+java -cp /ovs:usi/target/usi-0.0.1-jar-with-dependencies.jar daq.usi.UsiServer
diff --git a/validator/.idea/codeStyles/codeStyleConfig.xml b/validator/.idea/codeStyles/codeStyleConfig.xml
index a55e7a179b..b9d18bf599 100644
--- a/validator/.idea/codeStyles/codeStyleConfig.xml
+++ b/validator/.idea/codeStyles/codeStyleConfig.xml
@@ -1,5 +1,5 @@
-
+
\ No newline at end of file
diff --git a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_10_3.xml b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_11_0.xml
similarity index 54%
rename from validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_10_3.xml
rename to validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_11_0.xml
index 940abc9cd6..fef9a9403a 100644
--- a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_10_3.xml
+++ b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_annotations_2_11_0.xml
@@ -1,11 +1,11 @@
-
+
-
+
-
+
\ No newline at end of file
diff --git a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_10_3.xml b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_11_0.xml
similarity index 55%
rename from validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_10_3.xml
rename to validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_11_0.xml
index c39a1aad89..93709bc78c 100644
--- a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_10_3.xml
+++ b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_core_2_11_0.xml
@@ -1,11 +1,11 @@
-
+
-
+
-
+
\ No newline at end of file
diff --git a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_10_3.xml b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_11_0.xml
similarity index 55%
rename from validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_10_3.xml
rename to validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_11_0.xml
index 401e4470cc..326959d4e0 100644
--- a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_10_3.xml
+++ b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_core_jackson_databind_2_11_0.xml
@@ -1,11 +1,11 @@
-
+
-
+
-
+
\ No newline at end of file
diff --git a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_10_3.xml b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_11_0.xml
similarity index 68%
rename from validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_10_3.xml
rename to validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_11_0.xml
index eeaf4be6fe..8b4a7f585e 100644
--- a/validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_10_3.xml
+++ b/validator/.idea/libraries/Gradle__com_fasterxml_jackson_dataformat_jackson_dataformat_yaml_2_11_0.xml
@@ -1,11 +1,11 @@
-
+
-
+
-
+
\ No newline at end of file
diff --git a/validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_24.xml b/validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_26.xml
similarity index 50%
rename from validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_24.xml
rename to validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_26.xml
index 6d98003d93..734cd9dad1 100644
--- a/validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_24.xml
+++ b/validator/.idea/libraries/Gradle__org_yaml_snakeyaml_1_26.xml
@@ -1,11 +1,11 @@
-
+
-
+
-
+
\ No newline at end of file
diff --git a/validator/.idea/modules/daq-validator.validator.iml b/validator/.idea/modules/daq-validator.validator.iml
index c07f3da9ef..f12ef8c574 100644
--- a/validator/.idea/modules/daq-validator.validator.iml
+++ b/validator/.idea/modules/daq-validator.validator.iml
@@ -12,8 +12,8 @@
-
-
+
+
@@ -46,9 +46,9 @@
-
-
-
+
+
+
diff --git a/validator/bin/registrar b/validator/bin/registrar
index c302518d21..49dc18d1e3 100755
--- a/validator/bin/registrar
+++ b/validator/bin/registrar
@@ -16,6 +16,9 @@ devices_dir=$2
schema_dir=$3
device_filter=$4
+echo Using gcloud auth:
+gcloud config get-value account || true
+
echo Using cloud project $project_id
echo Using site config dir $devices_dir
echo Using schema root dir $schema_dir
@@ -24,6 +27,7 @@ echo Using device filter $device_filter
JAVA=/usr/lib/jvm/java-11-openjdk-amd64/bin/java
error=0
+echo java args $project_id $devices_dir $schema_dir $device_filter
$JAVA -cp $jarfile $mainclass $project_id $devices_dir $schema_dir $device_filter || error=$?
echo Registrar complete, exit $error
diff --git a/validator/bin/test_schema b/validator/bin/test_schema
index c73505aa2e..f3adbf1657 100755
--- a/validator/bin/test_schema
+++ b/validator/bin/test_schema
@@ -59,7 +59,7 @@ for schema in $schemas; do
error=0
reltest=${testpath#$rootdir/}
- (cd $rootdir; java -jar $jarfile $schemaname $reltest $ignoreset) 2> $output || error=$?
+ (cd $rootdir; java -jar $jarfile $schemaname $reltest $ignoreset --) 2> $output || error=$?
if [ $force == y ]; then
diff $expected $output || echo Updating $expected && cp $output $expected
else
diff --git a/validator/bin/validate b/validator/bin/validate
index 910021f982..e79c158756 100755
--- a/validator/bin/validate
+++ b/validator/bin/validate
@@ -20,6 +20,9 @@ if [ ! -f $jarfile ]; then
validator/bin/build
fi
+echo Using gcloud auth:
+gcloud config get-value account || true
+
echo Executing validator $schema $target...
echo Validating against schema $schemafile into validations/
diff --git a/validator/src/main/java/com/google/daq/mqtt/registrar/LocalDevice.java b/validator/src/main/java/com/google/daq/mqtt/registrar/LocalDevice.java
index f36e338b85..e32dc8b2b0 100644
--- a/validator/src/main/java/com/google/daq/mqtt/registrar/LocalDevice.java
+++ b/validator/src/main/java/com/google/daq/mqtt/registrar/LocalDevice.java
@@ -51,7 +51,6 @@ class LocalDevice {
private static final String RSA_CERT_PEM = "rsa_cert.pem";
private static final String RSA_PRIVATE_PEM = "rsa_private.pem";
private static final String RSA_PRIVATE_PKCS8 = "rsa_private.pkcs8";
- private static final String PHYSICAL_TAG_ERROR = "Physical tag %s %s does not match expected %s";
private static final Set DEVICE_FILES = ImmutableSet.of(METADATA_JSON);
private static final Set KEY_FILES = ImmutableSet.of(RSA_PUBLIC_PEM, RSA_PRIVATE_PEM, RSA_PRIVATE_PKCS8);
@@ -68,18 +67,17 @@ class LocalDevice {
private final Map schemas;
private final File deviceDir;
private final UdmiSchema.Metadata metadata;
- private final File devicesDir;
private final ExceptionMap exceptionMap;
private String deviceNumId;
private CloudDeviceSettings settings;
+ private DeviceCredential deviceCredential;
LocalDevice(File devicesDir, String deviceId, Map schemas) {
try {
this.deviceId = deviceId;
this.schemas = schemas;
- this.devicesDir = devicesDir;
exceptionMap = new ExceptionMap("Exceptions for " + deviceId);
deviceDir = new File(devicesDir, deviceId);
metadata = readMetadata();
@@ -162,16 +160,21 @@ private String getAuthFileType() {
return RSA_CERT_TYPE.equals(getAuthType()) ? RSA_CERT_FILE : RSA_KEY_FILE;
}
- private DeviceCredential loadCredential() {
+ public DeviceCredential loadCredential() {
+ deviceCredential = readCredential();
+ return deviceCredential;
+ }
+
+ public DeviceCredential readCredential() {
try {
if (hasGateway() && getAuthType() != null) {
- throw new RuntimeException("Proxied devices should not have auth_type defined");
+ throw new RuntimeException("Proxied devices should not have cloud.auth_type defined");
}
if (!isDirectConnect()) {
return null;
}
if (getAuthType() == null) {
- throw new RuntimeException("Credential auth_type definition missing");
+ throw new RuntimeException("Credential cloud.auth_type definition missing");
}
File deviceKeyFile = new File(deviceDir, publicKeyFile());
if (!deviceKeyFile.exists()) {
@@ -223,10 +226,6 @@ boolean isDirectConnect() {
return isGateway() || !hasGateway();
}
- String getGatewayId() {
- return hasGateway() ? metadata.gateway.gateway_id : null;
- }
-
CloudDeviceSettings getSettings() {
try {
if (settings != null) {
@@ -236,7 +235,7 @@ CloudDeviceSettings getSettings() {
if (metadata == null) {
return settings;
}
- settings.credential = loadCredential();
+ settings.credential = deviceCredential;
settings.metadata = metadataString();
settings.config = deviceConfigString();
settings.proxyDevices = getProxyDevicesList();
@@ -297,6 +296,7 @@ private String metadataString() {
}
public void validateEnvelope(String registryId, String siteName) {
+ checkConsistency(siteName);
try {
UdmiSchema.Envelope envelope = new UdmiSchema.Envelope();
envelope.deviceId = deviceId;
@@ -309,7 +309,6 @@ public void validateEnvelope(String registryId, String siteName) {
} catch (Exception e) {
throw new IllegalStateException("Validating envelope " + deviceId, e);
}
- checkConsistency(siteName);
}
private String fakeProjectId() {
@@ -317,15 +316,17 @@ private String fakeProjectId() {
}
private void checkConsistency(String expectedSite) {
- String siteName = metadata.system.location.site;
- String assetSite = metadata.system.physical_tag.asset.site;
String assetName = metadata.system.physical_tag.asset.name;
- Preconditions.checkState(expectedSite.equals(siteName),
- String.format(PHYSICAL_TAG_ERROR, "location", siteName, expectedSite));
- Preconditions.checkState(expectedSite.equals(assetSite),
- String.format(PHYSICAL_TAG_ERROR, "site", assetSite, expectedSite));
Preconditions.checkState(deviceId.equals(assetName),
- String.format(PHYSICAL_TAG_ERROR, "name", assetName, deviceId));
+ String.format("system.physical_tag.asset.name %s does not match expected %s", assetName, deviceId));
+
+ String assetSite = metadata.system.physical_tag.asset.site;
+ Preconditions.checkState(expectedSite.equals(assetSite),
+ String.format("system.physical_tag.asset.site %s does not match expected %s", assetSite, expectedSite));
+
+ String siteName = metadata.system.location.site;
+ Preconditions.checkState(expectedSite.equals(siteName),
+ String.format("system.location.site %s does not match expected %s", siteName, expectedSite));
}
private String makeNumId(UdmiSchema.Envelope envelope) {
@@ -335,11 +336,12 @@ private String makeNumId(UdmiSchema.Envelope envelope) {
public void writeErrors() {
File errorsFile = new File(deviceDir, DEVICE_ERRORS_JSON);
- System.err.println("Updating " + errorsFile);
if (exceptionMap.isEmpty()) {
+ System.err.println("Removing " + errorsFile);
errorsFile.delete();
return;
}
+ System.err.println("Updating " + errorsFile);
try (PrintStream printStream = new PrintStream(new FileOutputStream(errorsFile))) {
ExceptionMap.ErrorTree errorTree = ExceptionMap.format(exceptionMap, ERROR_FORMAT_INDENT);
errorTree.write(printStream);
@@ -377,8 +379,9 @@ void writeNormalized() {
public void writeConfigFile() {
File configFile = new File(deviceDir, GENERATED_CONFIG_JSON);
try (OutputStream outputStream = new FileOutputStream(configFile)) {
- outputStream.write(settings.config.getBytes());
+ outputStream.write(getSettings().config.getBytes());
} catch (Exception e) {
+ e.printStackTrace();
throw new RuntimeException("While writing "+ configFile.getAbsolutePath(), e);
}
}
@@ -399,7 +402,7 @@ public ExceptionMap getErrors() {
return exceptionMap;
}
- public boolean hasValidMetadata() {
+ public boolean isValid() {
return metadata != null;
}
diff --git a/validator/src/main/java/com/google/daq/mqtt/registrar/Registrar.java b/validator/src/main/java/com/google/daq/mqtt/registrar/Registrar.java
index 6b991bca48..e665d9ed16 100644
--- a/validator/src/main/java/com/google/daq/mqtt/registrar/Registrar.java
+++ b/validator/src/main/java/com/google/daq/mqtt/registrar/Registrar.java
@@ -1,5 +1,7 @@
package com.google.daq.mqtt.registrar;
+import static java.util.stream.Collectors.toSet;
+
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
@@ -7,24 +9,30 @@
import com.google.api.services.cloudiot.v1.model.Device;
import com.google.api.services.cloudiot.v1.model.DeviceCredential;
import com.google.common.base.Preconditions;
-import com.google.daq.mqtt.util.*;
+import com.google.common.collect.ImmutableList;
+import com.google.daq.mqtt.util.CloudDeviceSettings;
+import com.google.daq.mqtt.util.CloudIotManager;
+import com.google.daq.mqtt.util.ConfigUtil;
+import com.google.daq.mqtt.util.ExceptionMap;
import com.google.daq.mqtt.util.ExceptionMap.ErrorTree;
-import org.everit.json.schema.Schema;
-import org.everit.json.schema.loader.SchemaClient;
-import org.everit.json.schema.loader.SchemaLoader;
-import org.json.JSONObject;
-import org.json.JSONTokener;
-
+import com.google.daq.mqtt.util.PubSubPusher;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.math.BigInteger;
-import java.util.*;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
-
-import static java.util.stream.Collectors.toSet;
+import org.everit.json.schema.Schema;
+import org.everit.json.schema.loader.SchemaClient;
+import org.everit.json.schema.loader.SchemaLoader;
+import org.json.JSONObject;
+import org.json.JSONTokener;
public class Registrar {
@@ -43,6 +51,7 @@ public class Registrar {
.setDateFormat(new ISO8601DateFormat())
.setSerializationInclusion(Include.NON_NULL);
public static final String ALL_MATCH = "";
+ private static final String LOCAL_ONLY_PROJECT_ID = "--";
private CloudIotManager cloudIotManager;
private File siteConfig;
@@ -90,7 +99,7 @@ private void writeErrors() throws Exception {
.put(device.getDeviceId(), "True");
}
});
- if (!blockErrors.isEmpty()) {
+ if (blockErrors != null && !blockErrors.isEmpty()) {
errorSummary.put("Block", blockErrors.stream().collect(Collectors.toMap(
Map.Entry::getKey, entry -> entry.getValue().toString())));
}
@@ -121,27 +130,31 @@ private void processDevices(String deviceRegex) {
Set extraDevices = cloudDevices.stream().map(Device::getId).collect(toSet());
for (String localName : localDevices.keySet()) {
LocalDevice localDevice = localDevices.get(localName);
- if (!localDevice.hasValidMetadata()) {
+ if (!localDevice.isValid()) {
System.err.println("Skipping (invalid) " + localName);
continue;
}
extraDevices.remove(localName);
try {
- updateCloudIoT(localDevice);
localDevice.writeConfigFile();
- Device device = Preconditions.checkNotNull(fetchDevice(localName),
- "missing device " + localName);
- BigInteger numId = Preconditions.checkNotNull(device.getNumId(),
- "missing deviceNumId for " + localName);
- localDevice.setDeviceNumId(numId.toString());
- sendMetadataMessage(localDevice);
+ if (!localOnly()) {
+ updateCloudIoT(localDevice);
+ Device device = Preconditions.checkNotNull(fetchDevice(localName),
+ "missing device " + localName);
+ BigInteger numId = Preconditions.checkNotNull(device.getNumId(),
+ "missing deviceNumId for " + localName);
+ localDevice.setDeviceNumId(numId.toString());
+ sendMetadataMessage(localDevice);
+ }
} catch (Exception e) {
System.err.println("Deferring exception: " + e.toString());
localDevice.getErrors().put("Registering", e);
}
}
- bindGatewayDevices(localDevices);
- blockErrors = blockExtraDevices(extraDevices);
+ if (!localOnly()) {
+ bindGatewayDevices(localDevices);
+ blockErrors = blockExtraDevices(extraDevices);
+ }
System.err.println(String.format("Processed %d devices", localDevices.size()));
} catch (Exception e) {
throw new RuntimeException("While processing devices", e);
@@ -209,8 +222,17 @@ private void shutdown() {
}
private List fetchDeviceList(Pattern devicePattern) {
- System.err.println("Fetching remote registry " + cloudIotManager.getRegistryId());
- return cloudIotManager.fetchDeviceList(devicePattern);
+ if (localOnly()) {
+ System.err.println("Skipping remote registry fetch");
+ return ImmutableList.of();
+ } else {
+ System.err.println("Fetching remote registry " + cloudIotManager.getRegistryPath());
+ return cloudIotManager.fetchDeviceList(devicePattern);
+ }
+ }
+
+ private boolean localOnly() {
+ return LOCAL_ONLY_PROJECT_ID.equals(projectId);
}
private Map loadLocalDevices(Pattern devicePattern) {
@@ -267,8 +289,13 @@ private Map loadDevices(File devicesDir, String[] devices,
Matcher deviceMatch = devicePattern.matcher(deviceName);
if (deviceMatch.find() && LocalDevice.deviceExists(devicesDir, deviceName)) {
System.err.println("Loading local device " + deviceName);
- LocalDevice localDevice = new LocalDevice(devicesDir, deviceName, schemas);
- localDevices.put(deviceName, localDevice);
+ LocalDevice localDevice = localDevices.computeIfAbsent(deviceName,
+ keyName -> new LocalDevice(devicesDir, deviceName, schemas));
+ try {
+ localDevice.loadCredential();
+ } catch (Exception e) {
+ localDevice.getErrors().put("Credential", e);
+ }
try {
localDevice.validateEnvelope(cloudIotManager.getRegistryId(), cloudIotManager.getSiteName());
} catch (Exception e) {
diff --git a/validator/src/main/java/com/google/daq/mqtt/util/CloudIotManager.java b/validator/src/main/java/com/google/daq/mqtt/util/CloudIotManager.java
index f69c3bb0ae..1fec58ed0a 100644
--- a/validator/src/main/java/com/google/daq/mqtt/util/CloudIotManager.java
+++ b/validator/src/main/java/com/google/daq/mqtt/util/CloudIotManager.java
@@ -68,12 +68,12 @@ private static CloudIotConfig validate(CloudIotConfig cloudIotConfig) {
return cloudIotConfig;
}
- private String getRegistryPath(String registryId) {
+ public String getRegistryPath() {
return projectPath + "/registries/" + registryId;
}
private String getDevicePath(String registryId, String deviceId) {
- return getRegistryPath(registryId) + "/devices/" + deviceId;
+ return getRegistryPath() + "/devices/" + deviceId;
}
private void initializeCloudIoT() {
@@ -168,7 +168,7 @@ private GatewayConfig getGatewayConfig(CloudDeviceSettings settings) {
private void createDevice(String deviceId, CloudDeviceSettings settings) throws IOException {
try {
- cloudIotRegistries.devices().create(getRegistryPath(registryId),
+ cloudIotRegistries.devices().create(getRegistryPath(),
makeDevice(deviceId, settings, null)).execute();
} catch (GoogleJsonResponseException e) {
throw new RuntimeException("Remote error creating device " + deviceId, e);
@@ -205,7 +205,7 @@ public List fetchDeviceList(Pattern devicePattern) {
try {
List devices = cloudIotRegistries
.devices()
- .list(getRegistryPath(registryId))
+ .list(getRegistryPath())
.setPageSize(LIST_PAGE_SIZE)
.execute()
.getDevices();
@@ -254,7 +254,7 @@ public Object getCloudRegion() {
}
public void bindDevice(String proxyDeviceId, String gatewayDeviceId) throws IOException {
- cloudIotRegistries.bindDeviceToGateway(getRegistryPath(registryId),
+ cloudIotRegistries.bindDeviceToGateway(getRegistryPath(),
getBindRequest(proxyDeviceId, gatewayDeviceId)).execute();
}
diff --git a/validator/src/main/java/com/google/daq/mqtt/util/FirestoreDataSink.java b/validator/src/main/java/com/google/daq/mqtt/util/FirestoreDataSink.java
index 9a09a0cc9d..ac0d527f2a 100644
--- a/validator/src/main/java/com/google/daq/mqtt/util/FirestoreDataSink.java
+++ b/validator/src/main/java/com/google/daq/mqtt/util/FirestoreDataSink.java
@@ -1,6 +1,5 @@
package com.google.daq.mqtt.util;
-import com.google.auth.Credentials;
import com.google.auth.oauth2.GoogleCredentials;
import com.google.cloud.ServiceOptions;
import com.google.cloud.firestore.DocumentReference;
@@ -8,9 +7,6 @@
import com.google.cloud.firestore.FirestoreOptions;
import com.google.common.base.Preconditions;
import com.google.daq.mqtt.util.ExceptionMap.ErrorTree;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
@@ -19,8 +15,6 @@
public class FirestoreDataSink {
- private static final String
- CREDENTIAL_ERROR_FORMAT = "Credential file %s defined by %s not found.";
private static final String
VIEW_URL_FORMAT = "https://console.cloud.google.com/firestore/data/registries/?project=%s";
@@ -34,10 +28,10 @@ public class FirestoreDataSink {
public FirestoreDataSink() {
try {
- Credentials projectCredentials = getProjectCredentials();
+ GoogleCredentials credential = GoogleCredentials.getApplicationDefault();
FirestoreOptions firestoreOptions =
FirestoreOptions.getDefaultInstance().toBuilder()
- .setCredentials(projectCredentials)
+ .setCredentials(credential)
.setProjectId(projectId)
.setTimestampsInSnapshotsEnabled(true)
.build();
@@ -48,20 +42,8 @@ public FirestoreDataSink() {
}
}
- private Credentials getProjectCredentials() throws IOException {
- File credentialFile = new File(System.getenv(ServiceOptions.CREDENTIAL_ENV_NAME));
- if (!credentialFile.exists()) {
- throw new RuntimeException(String.format(CREDENTIAL_ERROR_FORMAT,
- credentialFile.getAbsolutePath(), ServiceOptions.CREDENTIAL_ENV_NAME));
- }
- try (FileInputStream serviceAccount = new FileInputStream(credentialFile)) {
- return GoogleCredentials.fromStream(serviceAccount);
- }
- }
-
public void validationResult(String deviceId, String schemaId, Map attributes,
- Object message,
- ErrorTree errorTree) {
+ Object message, ErrorTree errorTree) {
if (oldError.get() != null) {
throw oldError.getAndSet(null);
}
diff --git a/validator/src/main/java/com/google/daq/mqtt/validator/Validator.java b/validator/src/main/java/com/google/daq/mqtt/validator/Validator.java
index 5fc94ed99c..ca133f2769 100644
--- a/validator/src/main/java/com/google/daq/mqtt/validator/Validator.java
+++ b/validator/src/main/java/com/google/daq/mqtt/validator/Validator.java
@@ -56,6 +56,7 @@ public class Validator {
private static final String DEVICE_REGISTRY_ID_KEY = "deviceRegistryId";
private static final String UNKNOWN_SCHEMA_DEFAULT = "unknown";
private static final String POINTSET_TYPE = "pointset";
+ private static final String NO_SITE = "--";
private FirestoreDataSink dataSink;
private File schemaRoot;
private String schemaSpec;
@@ -69,15 +70,14 @@ public class Validator {
public static void main(String[] args) {
Validator validator = new Validator();
try {
- System.out.println(ServiceOptions.CREDENTIAL_ENV_NAME + "=" +
- System.getenv(ServiceOptions.CREDENTIAL_ENV_NAME));
- if (args.length < 3 || args.length > 4) {
- throw new IllegalArgumentException("Args: schema target inst_name [site]");
+ if (args.length != 4) {
+ throw new IllegalArgumentException("Args: [schema] [target] [inst_name] [site]");
}
validator.setSchemaSpec(args[0]);
String targetSpec = args[1];
String instName = args[2];
- if (args.length >= 4) {
+ String siteDir = args[3];
+ if (!NO_SITE.equals(siteDir)) {
validator.setSiteDir(args[3]);
}
if (targetSpec.startsWith(PUBSUB_PREFIX)) {