From 9ce6550388a97cd5c564c5b7f4387d4c3b9e23d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sun, 4 Nov 2018 22:01:00 +0100 Subject: [PATCH 01/54] new controller with bandwidth calculation --- bachelor_controller/LICENSE | 9 + bachelor_controller/common.py | 28 ++ bachelor_controller/config_local.yaml | 29 ++ bachelor_controller/config_local_macosx.yaml | 33 +++ bachelor_controller/config_master.yaml | 28 ++ bachelor_controller/config_slave.yaml | 25 ++ bachelor_controller/config_slave2.yaml | 25 ++ bachelor_controller/my_control_app.py | 263 +++++++++++++++++++ bachelor_controller/my_filter.py | 53 ++++ bachelor_controller/my_local_control_app.py | 152 +++++++++++ bachelor_controller/readme.txt | 11 + 11 files changed, 656 insertions(+) create mode 100644 bachelor_controller/LICENSE create mode 100755 bachelor_controller/common.py create mode 100644 bachelor_controller/config_local.yaml create mode 100644 bachelor_controller/config_local_macosx.yaml create mode 100644 bachelor_controller/config_master.yaml create mode 100644 bachelor_controller/config_slave.yaml create mode 100644 bachelor_controller/config_slave2.yaml create mode 100755 bachelor_controller/my_control_app.py create mode 100755 bachelor_controller/my_filter.py create mode 100755 bachelor_controller/my_local_control_app.py create mode 100644 bachelor_controller/readme.txt diff --git a/bachelor_controller/LICENSE b/bachelor_controller/LICENSE new file mode 100644 index 0000000..8ac59fd --- /dev/null +++ b/bachelor_controller/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2016 Technische Universität Berlin + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/bachelor_controller/common.py b/bachelor_controller/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/bachelor_controller/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/bachelor_controller/config_local.yaml b/bachelor_controller/config_local.yaml new file mode 100644 index 0000000..92999a8 --- /dev/null +++ b/bachelor_controller/config_local.yaml @@ -0,0 +1,29 @@ +## UniFlex Agent config file + +config: + name: 'Local_Controller' + info: 'CPs and modules on single node' + iface: 'lo' + +control_applications: + myController: + file : my_local_control_app.py + class_name : MyController + kwargs : {} + + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + +modules: + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + + simple: + module : uniflex_module_simple + class_name : SimpleModule2 + devices : ['phy0'] diff --git a/bachelor_controller/config_local_macosx.yaml b/bachelor_controller/config_local_macosx.yaml new file mode 100644 index 0000000..d2abb09 --- /dev/null +++ b/bachelor_controller/config_local_macosx.yaml @@ -0,0 +1,33 @@ +## UniFlex Agent config file + +config: + name: 'Local_Controller' + info: 'CPs and modules on single node' + iface: 'en0' + +control_applications: + myController: + file : my_local_control_app.py + class_name : MyController + kwargs : {} + + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + +modules: + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + + simple: + module : uniflex_module_simple + class_name : SimpleModule2 + devices : ['phy0'] + + iperf: + module : uniflex_module_iperf + class_name : IperfModule diff --git a/bachelor_controller/config_master.yaml b/bachelor_controller/config_master.yaml new file mode 100644 index 0000000..a4d507b --- /dev/null +++ b/bachelor_controller/config_master.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: "Global_Controller" + info: 'agent hosts global controller' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xpub: "tcp://127.0.0.1:8990" + xsub: "tcp://127.0.0.1:8989" + +control_applications: + myController: + file : my_control_app.py + class_name : MyController + kwargs : {} + +modules: + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoveryMasterModule + kwargs: {"iface":"lo", + "groupName":"uniflex_1234", + "sub":"tcp://127.0.0.1:8990", + "pub":"tcp://127.0.0.1:8989" + } diff --git a/bachelor_controller/config_slave.yaml b/bachelor_controller/config_slave.yaml new file mode 100644 index 0000000..4ce221e --- /dev/null +++ b/bachelor_controller/config_slave.yaml @@ -0,0 +1,25 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "MAC_List" : ["38:10:d5:d7:54:82", "38:10:d5:d7:59:23"]} + diff --git a/bachelor_controller/config_slave2.yaml b/bachelor_controller/config_slave2.yaml new file mode 100644 index 0000000..21c58dc --- /dev/null +++ b/bachelor_controller/config_slave2.yaml @@ -0,0 +1,25 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "MAC_List" : ["38:10:d5:9a:0b:60"]} + diff --git a/bachelor_controller/my_control_app.py b/bachelor_controller/my_control_app.py new file mode 100755 index 0000000..d45a6f6 --- /dev/null +++ b/bachelor_controller/my_control_app.py @@ -0,0 +1,263 @@ +import logging +import datetime +import random + +from sbi.radio_device.events import PacketLossEvent +from uniflex.core import modules +from uniflex.core import events +from uniflex.core.timer import TimerEventSender +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz, Sascha Rösler" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de, s.resler@campus.tu-berlin.de" + + +class PeriodicEvaluationTimeEvent(events.TimeEvent): + def __init__(self): + super().__init__() + + +class MyController(modules.ControlApplication): + def __init__(self): + super(MyController, self).__init__() + self.log = logging.getLogger('MyController') + self.running = False + + self.timeInterval = 10 + self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) + self.timer.start(self.timeInterval) + + self.packetLossEventsEnabled = False + self.channel = 1 + + @modules.on_start() + def my_start_function(self): + print("start control app") + self.running = True + + @modules.on_exit() + def my_stop_function(self): + print("stop control app") + self.running = False + + @modules.on_event(events.NewNodeEvent) + def add_node(self, event): + node = event.node + + self.log.info("Added new node: {}, Local: {}" + .format(node.uuid, node.local)) + self._add_node(node) + + for dev in node.get_devices(): + print("Dev: ", dev.name) + print(dev) + + for m in node.get_modules(): + print("Module: ", m.name) + print(m) + + for app in node.get_control_applications(): + print("App: ", app.name) + print(app) + + device = node.get_device(0) + device.set_tx_power(15, "wlan0") + device.set_channel(random.randint(1, 11), "wlan0") + #device.packet_loss_monitor_start() + #device.spectral_scan_start() + # device.play_waveform() + # TODO: is_implemented() + + @modules.on_event(events.NodeExitEvent) + @modules.on_event(events.NodeLostEvent) + def remove_node(self, event): + self.log.info("Node lost".format()) + node = event.node + reason = event.reason + if self._remove_node(node): + self.log.info("Node: {}, Local: {} removed reason: {}" + .format(node.uuid, node.local, reason)) + + @modules.on_event(PacketLossEvent) + def serve_packet_loss_event(self, event): + node = event.node + device = event.device + self.log.info("Packet loss in node {}, dev: {}" + .format(node.hostname, device.name)) + + @modules.on_event(AveragedSpectrumScanSampleEvent) + def serve_spectral_scan_sample(self, event): + avgSample = event.avg + self.log.info("Averaged Spectral Scan Sample: {}" + .format(avgSample)) + + def default_cb(self, data): + node = data.node + devName = None + if data.device: + devName = data.device.name + msg = data.msg + print("Default Callback: " + "Node: {}, Dev: {}, Data: {}" + .format(node.hostname, devName, msg)) + + def get_power_cb(self, data): + node = data.node + msg = data.msg + dev = node.get_device(0) + print("Power in " + "Node: {}, Dev: {}, was set to: {}" + .format(node.hostname, dev.name, msg)) + + newPwr = random.randint(1, 20) + dev.blocking(False).set_tx_power(newPwr, "wlan0") + print("Power in " + "Node: {}, Dev: {}, was set to: {}" + .format(node.hostname, dev.name, newPwr)) + + def scheduled_get_channel_cb(self, data): + node = data.node + msg = data.msg + dev = node.get_device(0) + print("Scheduled get_channel; Power in " + "Node: {}, Dev: {}, was set to: {}" + .format(node.hostname, dev.name, msg)) + + @modules.on_event(PeriodicEvaluationTimeEvent) + def periodic_evaluation(self, event): + # go over collected samples, etc.... + # make some decisions, etc... + print("Periodic Evaluation") + print("My nodes: ", [node.hostname for node in self.get_nodes()]) + self.timer.start(self.timeInterval) + + if len(self.get_nodes()) == 0: + return + + flows = [] + for node in self.get_nodes(): + for device in node.get_devices(): + device.spectral_scan_stop() + chnum = device.get_channel("wlan0") + chw = device.get_channel_width("wlan0") + infos = device.get_info_of_connected_devices("wlan0") + + for mac in infos: + flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw}) + + for node in self.get_nodes(): + print ("work " + node.hostname) + for device in node.get_devices(): + + if type(device.my_control_flow) is not list: + device.my_control_flow = [] + + for flow in device.my_control_flow: + flow['old'] = True + + device.set_packet_counter(flows, "wlan0") + chnum = device.get_channel("wlan0") + chw = device.get_channel_width("wlan0") + infos = device.get_info_of_connected_devices("wlan0") + + bandwidth = {} + + for mac in infos: + values = infos[mac] + newTxBytes = int(values['tx bytes'][0]) + + flow = [d for d in device.my_control_flow if d['mac address'] == mac] + if len(flow) > 0: + flow = flow[0] + dif = datetime.datetime.now() - flow['last update'] + bandwidth[mac] = (newTxBytes - flow['tx bytes'] ) / (dif.total_seconds() + dif.microseconds / 1000000.0) + flow['tx bytes'] = newTxBytes + flow['last update'] = datetime.datetime.now() + flow['old'] = False + else : + device.my_control_flow.append({'mac address' : mac, 'tx bytes' : newTxBytes, 'last update' : datetime.datetime.now(), 'old' : False}) + + for flow in device.my_control_flow: + if flow['old']: + device.my_control_flow.remove(flow) + + print ("device " + device.name + " operates on channel " + str(chnum) + " with a bandwidth of " + chw + " - change to channel " + str(self.channel)) + print(bandwidth) + + device.blocking(False).set_channel(self.channel, "wlan0") + + self.channel += 1 + if self.channel > 13: + self.channel = 1 + ''' + node = self.get_node(0) + device = node.get_device(0) + + if device.is_packet_loss_monitor_running(): + device.packet_loss_monitor_stop() + device.spectral_scan_stop() + else: + device.packet_loss_monitor_start() + device.spectral_scan_start() + + avgFilterApp = None + for app in node.get_control_applications(): + if app.name == "MyAvgFilter": + avgFilterApp = app + break + + if avgFilterApp.is_running(): + myValue = random.randint(1, 20) + [nValue1, nValue2] = avgFilterApp.blocking(True).add_two(myValue) + print("My value: {} + 2 = {}".format(myValue, nValue1)) + print("My value: {} * 2 = {}".format(myValue, nValue2)) + avgFilterApp.stop() + + newWindow = random.randint(10, 50) + old = avgFilterApp.blocking(True).get_window_size() + print("Old Window Size : {}".format(old)) + avgFilterApp.blocking(True).change_window_size_func(newWindow) + nValue = avgFilterApp.blocking(True).get_window_size() + print("New Window Size : {}".format(nValue)) + + else: + avgFilterApp.start() + newWindow = random.randint(10, 50) + event = ChangeWindowSizeEvent(newWindow) + avgFilterApp.send_event(event) + + # execute non-blocking function immediately + device.blocking(False).set_tx_power(random.randint(1, 20), "wlan0") + + # execute non-blocking function immediately, with specific callback + device.callback(self.get_power_cb).get_tx_power("wlan0") + + # schedule non-blocking function delay + device.delay(3).callback(self.default_cb).get_tx_power("wlan0") + + # schedule non-blocking function exec time + exec_time = datetime.datetime.now() + datetime.timedelta(seconds=3) + newChannel = random.randint(1, 11) + device.exec_time(exec_time).set_channel(newChannel, "wlan0") + + # schedule execution of function multiple times + start_date = datetime.datetime.now() + datetime.timedelta(seconds=2) + interval = datetime.timedelta(seconds=1) + repetitionNum = 3 + device.exec_time(start_date, interval, repetitionNum).callback(self.scheduled_get_channel_cb).get_channel("wlan0") + + # execute blocking function immediately + result = device.get_channel("wlan0") + print("{} Channel is: {}".format(datetime.datetime.now(), result)) + + # exception handling, clean_per_flow_tx_power_table implementation + # raises exception + try: + device.clean_per_flow_tx_power_table("wlan0") + except Exception as e: + print("{} !!!Exception!!!: {}".format( + datetime.datetime.now(), e)) + ''' diff --git a/bachelor_controller/my_filter.py b/bachelor_controller/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/bachelor_controller/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/bachelor_controller/my_local_control_app.py b/bachelor_controller/my_local_control_app.py new file mode 100755 index 0000000..4b0db07 --- /dev/null +++ b/bachelor_controller/my_local_control_app.py @@ -0,0 +1,152 @@ +import logging +import datetime +import random +from sbi.radio_device.events import PacketLossEvent +from uniflex.core import modules +from uniflex.core import events +from uniflex.core.timer import TimerEventSender +from common import AveragedSpectrumScanSampleEvent +from common import StartMyFilterEvent +from common import StopMyFilterEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class PeriodicEvaluationTimeEvent(events.TimeEvent): + def __init__(self): + super().__init__() + + +class MyController(modules.ControlApplication): + def __init__(self): + super(MyController, self).__init__() + self.log = logging.getLogger('MyController') + self.running = False + self.nodes = [] + + self.timeInterval = 10 + self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) + self.timer.start(self.timeInterval) + + self.myFilterRunning = False + self.packetLossEventsEnabled = False + + @modules.on_start() + def my_start_function(self): + print("start control app") + self.running = True + + node = self.localNode + self.log.info("My local, Local: {}" + .format(node.local)) + + for dev in node.get_devices(): + print("Dev: ", dev.name) + print(dev) + + for m in node.get_modules(): + print("Module: ", m.name) + print(m) + + for app in node.get_control_applications(): + print("App: ", app.name) + print(app) + + device = node.get_device(0) + device.set_tx_power(15, 'ath0') + device.set_channel(random.randint(1, 11), 'ath0') + device.packet_loss_monitor_start() + self.packetLossEventsEnabled = True + device.spectral_scan_start() + + @modules.on_exit() + def my_stop_function(self): + print("stop control app") + self.running = False + + @modules.on_event(PacketLossEvent) + def serve_packet_loss_event(self, event): + node = event.node + device = event.device + self.log.info("Packet loss, dev: {}" + .format(device)) + + @modules.on_event(AveragedSpectrumScanSampleEvent) + def serve_spectral_scan_sample(self, event): + avgSample = event.avg + self.log.info("Averaged Spectral Scan Sample: {}" + .format(avgSample)) + + def default_cb(self, data): + node = data.node + devName = None + if data.device: + devName = data.device.name + msg = data.msg + print("Default Callback: " + "Dev: {}, Data: {}" + .format(devName, msg)) + + def get_power_cb(self, data): + node = data.node + dev = data.device + msg = data.msg + print("Power in " + "Dev: {}, was set to: {}" + .format(dev.name, msg)) + + @modules.on_event(PeriodicEvaluationTimeEvent) + def periodic_evaluation(self, event): + # go over collected samples, etc.... + # make some decisions, etc... + print("Periodic Evaluation") + + node = self.localNode + device = node.get_device(0) + + self.log.info("My local node, Local: {}" + .format(node.local)) + self.timer.start(self.timeInterval) + + if self.packetLossEventsEnabled: + device.packet_loss_monitor_stop() + self.packetLossEventsEnabled = False + else: + device.packet_loss_monitor_start() + self.packetLossEventsEnabled = True + + if self.myFilterRunning: + self.send_event(StopMyFilterEvent()) + self.myFilterRunning = False + else: + self.send_event(StartMyFilterEvent()) + self.myFilterRunning = True + + # execute non-blocking function immediately + device.blocking(False).set_tx_power(random.randint(1, 20), 'ath0') + + # execute non-blocking function immediately, with specific callback + device.callback(self.get_power_cb).get_tx_power('ath0') + + # schedule non-blocking function delay + device.delay(3).callback(self.default_cb).get_tx_power("wlan0") + + # schedule non-blocking function exec time + exec_time = datetime.datetime.now() + datetime.timedelta(seconds=3) + newChannel = random.randint(1, 11) + device.exec_time(exec_time).set_channel(newChannel, 'ath0') + + # execute blocking function immediately + result = device.get_channel('ath0') + print("{} Channel is: {}".format(datetime.datetime.now(), result)) + + # exception handling, clean_per_flow_tx_power_table implementation + # raises exception + try: + device.clean_per_flow_tx_power_table("wlan0") + except Exception as e: + print("{} !!!Exception!!!: {}".format( + datetime.datetime.now(), e)) diff --git a/bachelor_controller/readme.txt b/bachelor_controller/readme.txt new file mode 100644 index 0000000..cbbfb5a --- /dev/null +++ b/bachelor_controller/readme.txt @@ -0,0 +1,11 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +uniflex-agent --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml + +# For debugging mode run with -v option From a82c642218fc2bfae6d66917d392b735663e9bd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sun, 11 Nov 2018 20:17:00 +0100 Subject: [PATCH 02/54] add openAI controller in uniflex --- openAI_RRM/LICENSE | 9 + openAI_RRM/common.py | 28 ++ openAI_RRM/config_master.yaml | 30 ++ openAI_RRM/config_slave.yaml | 25 ++ openAI_RRM/config_slave2.yaml | 25 ++ openAI_RRM/my_control_app.py | 464 ++++++++++++++++++++++++++++++ openAI_RRM/my_filter.py | 53 ++++ openAI_RRM/openAI_gym/__init__.py | 0 openAI_RRM/openAI_gym/test.py | 77 +++++ openAI_RRM/readme.txt | 11 + 10 files changed, 722 insertions(+) create mode 100644 openAI_RRM/LICENSE create mode 100755 openAI_RRM/common.py create mode 100644 openAI_RRM/config_master.yaml create mode 100644 openAI_RRM/config_slave.yaml create mode 100644 openAI_RRM/config_slave2.yaml create mode 100755 openAI_RRM/my_control_app.py create mode 100755 openAI_RRM/my_filter.py create mode 100644 openAI_RRM/openAI_gym/__init__.py create mode 100644 openAI_RRM/openAI_gym/test.py create mode 100644 openAI_RRM/readme.txt diff --git a/openAI_RRM/LICENSE b/openAI_RRM/LICENSE new file mode 100644 index 0000000..8ac59fd --- /dev/null +++ b/openAI_RRM/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2016 Technische Universität Berlin + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/openAI_RRM/common.py b/openAI_RRM/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/openAI_RRM/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/openAI_RRM/config_master.yaml b/openAI_RRM/config_master.yaml new file mode 100644 index 0000000..d7b972d --- /dev/null +++ b/openAI_RRM/config_master.yaml @@ -0,0 +1,30 @@ +## UniFlex Agent config file + +config: + name: "Global_Controller" + info: 'agent hosts global controller' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xpub: "tcp://127.0.0.1:8990" + xsub: "tcp://127.0.0.1:8989" + +control_applications: + myController: + file : my_control_app.py + class_name : OpenAIRRM + kwargs : { + 'openAI_controller': "openAI_gym.test" + } + +modules: + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoveryMasterModule + kwargs: {"iface":"lo", + "groupName":"uniflex_1234", + "sub":"tcp://127.0.0.1:8990", + "pub":"tcp://127.0.0.1:8989" + } diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml new file mode 100644 index 0000000..4ce221e --- /dev/null +++ b/openAI_RRM/config_slave.yaml @@ -0,0 +1,25 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "MAC_List" : ["38:10:d5:d7:54:82", "38:10:d5:d7:59:23"]} + diff --git a/openAI_RRM/config_slave2.yaml b/openAI_RRM/config_slave2.yaml new file mode 100644 index 0000000..21c58dc --- /dev/null +++ b/openAI_RRM/config_slave2.yaml @@ -0,0 +1,25 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "MAC_List" : ["38:10:d5:9a:0b:60"]} + diff --git a/openAI_RRM/my_control_app.py b/openAI_RRM/my_control_app.py new file mode 100755 index 0000000..d84839f --- /dev/null +++ b/openAI_RRM/my_control_app.py @@ -0,0 +1,464 @@ +import logging +import datetime +import random + +from sbi.radio_device.events import PacketLossEvent +from uniflex.core import modules +from uniflex.core import events +from uniflex.core.timer import TimerEventSender +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz, Sascha Rösler" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de, s.resler@campus.tu-berlin.de" + + +class PeriodicEvaluationTimeEvent(events.TimeEvent): + def __init__(self): + super().__init__() + + +class OpenAIRRM(modules.ControlApplication): + def __init__(self,**kwargs): + super(OpenAIRRM, self).__init__() + self.log = logging.getLogger('OpenAI_RRM') + self.running = False + + self.timeInterval = 10 + self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) + self.timer.start(self.timeInterval) + + self.packetLossEventsEnabled = False + self.channel = 1 + + if not "openAI_controller" in kwargs: + raise ValueError("There is no OpenAI gym controller specified. Can not find \"" + "openAI_controller" + "\" as kwargs in the config file.") + else: + __import__(kwargs["openAI_controller"], globals(), locals(), [], 0) + splits = kwargs["openAI_controller"].split('.') + class_name = splits[-1] + self.openAI_controller = class_name(self, kwargs) + + @modules.on_start() + def my_start_function(self): + print("start control app") + self.running = True + + @modules.on_exit() + def my_stop_function(self): + print("stop control app") + self.running = False + + @modules.on_event(events.NewNodeEvent) + def add_node(self, event): + node = event.node + + self.log.info("Added new node: {}, Local: {}" + .format(node.uuid, node.local)) + self._add_node(node) + + for dev in node.get_devices(): + print("Dev: ", dev.name) + print(dev) + + for m in node.get_modules(): + print("Module: ", m.name) + print(m) + + for app in node.get_control_applications(): + print("App: ", app.name) + print(app) + + #device = node.get_device(0) + #device.set_tx_power(15, "wlan0") + #device.set_channel(random.randint(1, 11), "wlan0") + #device.packet_loss_monitor_start() + #device.spectral_scan_start() + # device.play_waveform() + # TODO: is_implemented() + + @modules.on_event(events.NodeExitEvent) + @modules.on_event(events.NodeLostEvent) + def remove_node(self, event): + self.log.info("Node lost".format()) + node = event.node + reason = event.reason + if self._remove_node(node): + self.log.info("Node: {}, Local: {} removed reason: {}" + .format(node.uuid, node.local, reason)) + + @modules.on_event(PacketLossEvent) + def serve_packet_loss_event(self, event): + node = event.node + device = event.device + self.log.info("Packet loss in node {}, dev: {}" + .format(node.hostname, device.name)) + + @modules.on_event(AveragedSpectrumScanSampleEvent) + def serve_spectral_scan_sample(self, event): + avgSample = event.avg + self.log.info("Averaged Spectral Scan Sample: {}" + .format(avgSample)) + + def default_cb(self, data): + node = data.node + devName = None + if data.device: + devName = data.device.name + msg = data.msg + print("Default Callback: " + "Node: {}, Dev: {}, Data: {}" + .format(node.hostname, devName, msg)) + + def get_power_cb(self, data): + node = data.node + msg = data.msg + dev = node.get_device(0) + print("Power in " + "Node: {}, Dev: {}, was set to: {}" + .format(node.hostname, dev.name, msg)) + + newPwr = random.randint(1, 20) + dev.blocking(False).set_tx_power(newPwr, "wlan0") + print("Power in " + "Node: {}, Dev: {}, was set to: {}" + .format(node.hostname, dev.name, newPwr)) + + def _get_device_by_uuids(self, node_uuid, dev_uuid): + nodes = self.get_nodes() + myNodes = [x for x in nodes if x.uuid == node_uuid] + if(len(myNodes) is not 1): + return None + node = myNodes[0] + devices = node.get_devices() + myDevices = [x for x in devices if x.uuid == dev_uuid] + if(len(myDevices) is not 1): + return None + return myDevices[0] + + def scheduled_get_channel_cb(self, data): + node = data.node + msg = data.msg + dev = node.get_device(0) + print("Scheduled get_channel; Power in " + "Node: {}, Dev: {}, was set to: {}" + .format(node.hostname, dev.name, msg)) + + def set_channel(self, node_uuid, dev_uuid, ifaceName, channel_number, channel_width): + device = self._get_device_by_uuids(node_uuid, dev_uuid) + if device is None: + return False + if channel_width is not None: + device.blocking(False).set_channel(channel_number, "wlan0", channel_width= channel_width) + else: + device.blocking(False).set_channel(channel_number, "wlan0") + return True + + ''' + Returns a list of the bandwidth of all transmitted data from one + controlled device to a client. The data is structured as follows: + { + 'MAC_of_client1' : { + 'mac' : 'MAC_of_client1', + 'bandwidth': bandwidth to the client, + 'node': { + 'hostname': 'hostname of my AP node', + 'uuid': 'uuid of my AP node' + }, + 'device': { + 'name': 'device name of the AP's physical interface', + 'uuid': 'uuid of the device', + }, + 'interface': 'name of the interface' + } + } + Notice: new devices have bandwidth 0! + ''' + def get_bandwidth(self): + bandwidth = {} + for node in self.get_nodes(): + for device in node.get_devices(): + if type(device.my_control_flow) is not list: + device.my_control_flow = [] + + for flow in device.my_control_flow: + flow['old'] = True + + for interface in device.get_interfaces(): + infos = device.get_info_of_connected_devices(interface) + + for mac in infos: + values = infos[mac] + newTxBytes = int(values['tx bytes'][0]) + + flow = [d for d in device.my_control_flow if d['mac address'] == mac] + if len(flow) > 0: + flow = flow[0] + dif = datetime.datetime.now() - flow['last update'] + bandwidth[mac] = { + 'bandwidth':(newTxBytes - flow['tx bytes'] ) / (dif.total_seconds() + dif.microseconds / 1000000.0), + 'node': {'hostname': node.hostname, 'uuid': node.uuid}, + 'device': {'name': device.name, 'uuid': device.uuid}, + 'interface': interface} + flow['tx bytes'] = newTxBytes + flow['last update'] = datetime.datetime.now() + flow['old'] = False + else : + device.my_control_flow.append({'mac address' : mac, 'tx bytes' : newTxBytes, 'last update' : datetime.datetime.now(), 'old' : False}) + bandwidth[mac] = { + 'mac' : mac, + 'bandwidth': 0, + 'node': {'hostname': node.hostname, 'uuid': node.uuid}, + 'device': {'name': device.name, 'uuid': device.uuid}, + 'interface': interface} + + for flow in device.my_control_flow: + if flow['old']: + device.my_control_flow.remove(flow) + return bandwidth + + ''' + Returns a data structure of all available interfaces in the system + It is structured as follows: + { + 'uuid_of_node_1': { + 'hostname' : 'hostname of node1', + 'uuid' : 'uuid of node1', + 'devices' : { + 'name' : 'name of device1', + 'uuid' : 'uuid of device1', + 'interfaces' : [ + 'name of iface1', 'name of iface2' + ] + }, + ... + }, + ... + } + ''' + def get_interfaces(self): + interfaces = {} + for node in self.get_nodes(): + nodeinfo = {'hostname': node.hostname, 'uuid': node.uuid} + devices = {} + for device in node.get_devices(): + devinfo = {'name': device.name, 'uuid': device.uuid} + interfaces_tmp = [] + for interface in device.get_interfaces(): + interfaces_tmp.append(interface) + devinfo['interfaces'] = interfaces_tmp + devices[device.uuid] = devinfo + nodeinfo['devices'] = devices + interfaces[node.uuid] = nodeinfo + return interfaces + + ''' + Collects and returns a list of the channel to interface mapping + [ + {'channel number' : 'number of the channel', + 'channel width' : 'width of the channel', + 'node': { + 'hostname': 'hostname of my AP node', + 'uuid': 'uuid of my AP node' + }, + 'device': { + 'name': 'device name of the AP's physical interface', + 'uuid': 'uuid of the device', + }, + 'interface': 'name of the interface' + ] + ''' + def get_channels(self): + channel_mapping = [] + for node in self.get_nodes(): + for device in node.get_devices(): + for interface in device.get_interfaces(): + chnum = device.get_channel(interface) + chw = device.get_channel_width(interface) + + channel_mapping.append({ + 'channel number' : chnum, + 'channel width' : chw, + 'device' : {'name': device.name, 'uuid': device.uuid}, + 'node' : {'hostname': node.hostname, 'uuid': node.uuid}, + 'interface' : interface}) + return channel_mapping + + def simulate_flows(self): + flows = [] + for node in self.get_nodes(): + for device in node.get_devices(): + for interface in device.get_interfaces(): + chnum = device.get_channel(interface) + chw = device.get_channel_width(interface) + infos = device.get_info_of_connected_devices(interface) + + for mac in infos: + flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw, 'iface': interface}) + + for node in self.get_nodes(): + for device in node.get_devices(): + for interface in device.get_interfaces(): + device.set_packet_counter(flows, interface) + + @modules.on_event(PeriodicEvaluationTimeEvent) + def periodic_evaluation(self, event): + # go over collected samples, etc.... + # make some decisions, etc... + print("Periodic Evaluation") + print("My nodes: ", [node.hostname for node in self.get_nodes()]) + self.timer.start(self.timeInterval) + + if len(self.get_nodes()) == 0: + return + + flows = [] + + ifaces = self.get_interfaces() + node_uuid = list(ifaces.keys())[0] + dev_uuid = list(ifaces[node_uuid]['devices'].keys())[0] + ifaceName = ifaces[node_uuid]['devices'][dev_uuid]['interfaces'][0] + + print(self.get_channels()) + self.simulate_flows() + print(self.get_bandwidth()) + + for node in self.get_nodes(): + for device in node.get_devices(): + for interface in device.get_interfaces(): + self.set_channel(node.uuid, device.uuid, interface, self.channel, None) + self.channel += 1 + if self.channel > 13: + self.channel = 1 + + ''' + print(self.get_bandwidth()) + + print(self.get_nodes()) + for node in self.get_nodes(): + print(node.get_devices()) + for device in node.get_devices(): + device.spectral_scan_stop() + chnum = device.get_channel("wlan0") + chw = device.get_channel_width("wlan0") + infos = device.get_info_of_connected_devices("wlan0") + + for mac in infos: + flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw}) + + for node in self.get_nodes(): + print ("work " + node.hostname) + for device in node.get_devices(): + + if type(device.my_control_flow) is not list: + device.my_control_flow = [] + + for flow in device.my_control_flow: + flow['old'] = True + + device.set_packet_counter(flows, "wlan0") + chnum = device.get_channel("wlan0") + chw = device.get_channel_width("wlan0") + infos = device.get_info_of_connected_devices("wlan0") + + bandwidth = {} + + for mac in infos: + values = infos[mac] + newTxBytes = int(values['tx bytes'][0]) + + flow = [d for d in device.my_control_flow if d['mac address'] == mac] + if len(flow) > 0: + flow = flow[0] + dif = datetime.datetime.now() - flow['last update'] + bandwidth[mac] = (newTxBytes - flow['tx bytes'] ) / (dif.total_seconds() + dif.microseconds / 1000000.0) + flow['tx bytes'] = newTxBytes + flow['last update'] = datetime.datetime.now() + flow['old'] = False + else : + device.my_control_flow.append({'mac address' : mac, 'tx bytes' : newTxBytes, 'last update' : datetime.datetime.now(), 'old' : False}) + + for flow in device.my_control_flow: + if flow['old']: + device.my_control_flow.remove(flow) + + print ("device " + device.name + " operates on channel " + str(chnum) + " with a bandwidth of " + chw + " - change to channel " + str(self.channel)) + print(bandwidth) + + device.blocking(False).set_channel(self.channel, "wlan0") + + self.channel += 1 + if self.channel > 13: + self.channel = 1 + ''' + ''' + node = self.get_node(0) + device = node.get_device(0) + + if device.is_packet_loss_monitor_running(): + device.packet_loss_monitor_stop() + device.spectral_scan_stop() + else: + device.packet_loss_monitor_start() + device.spectral_scan_start() + + avgFilterApp = None + for app in node.get_control_applications(): + if app.name == "MyAvgFilter": + avgFilterApp = app + break + + if avgFilterApp.is_running(): + myValue = random.randint(1, 20) + [nValue1, nValue2] = avgFilterApp.blocking(True).add_two(myValue) + print("My value: {} + 2 = {}".format(myValue, nValue1)) + print("My value: {} * 2 = {}".format(myValue, nValue2)) + avgFilterApp.stop() + + newWindow = random.randint(10, 50) + old = avgFilterApp.blocking(True).get_window_size() + print("Old Window Size : {}".format(old)) + avgFilterApp.blocking(True).change_window_size_func(newWindow) + nValue = avgFilterApp.blocking(True).get_window_size() + print("New Window Size : {}".format(nValue)) + + else: + avgFilterApp.start() + newWindow = random.randint(10, 50) + event = ChangeWindowSizeEvent(newWindow) + avgFilterApp.send_event(event) + + # execute non-blocking function immediately + device.blocking(False).set_tx_power(random.randint(1, 20), "wlan0") + + # execute non-blocking function immediately, with specific callback + device.callback(self.get_power_cb).get_tx_power("wlan0") + + # schedule non-blocking function delay + device.delay(3).callback(self.default_cb).get_tx_power("wlan0") + + # schedule non-blocking function exec time + exec_time = datetime.datetime.now() + datetime.timedelta(seconds=3) + newChannel = random.randint(1, 11) + device.exec_time(exec_time).set_channel(newChannel, "wlan0") + + # schedule execution of function multiple times + start_date = datetime.datetime.now() + datetime.timedelta(seconds=2) + interval = datetime.timedelta(seconds=1) + repetitionNum = 3 + device.exec_time(start_date, interval, repetitionNum).callback(self.scheduled_get_channel_cb).get_channel("wlan0") + + # execute blocking function immediately + result = device.get_channel("wlan0") + print("{} Channel is: {}".format(datetime.datetime.now(), result)) + + # exception handling, clean_per_flow_tx_power_table implementation + # raises exception + try: + device.clean_per_flow_tx_power_table("wlan0") + except Exception as e: + print("{} !!!Exception!!!: {}".format( + datetime.datetime.now(), e)) + ''' diff --git a/openAI_RRM/my_filter.py b/openAI_RRM/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/openAI_RRM/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/openAI_RRM/openAI_gym/__init__.py b/openAI_RRM/openAI_gym/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openAI_RRM/openAI_gym/test.py b/openAI_RRM/openAI_gym/test.py new file mode 100644 index 0000000..ecb8fe3 --- /dev/null +++ b/openAI_RRM/openAI_gym/test.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +#import tensorflow as tf +#import tensorflow.contrib.slim as slim +import numpy as np +from tensorflow import keras +from ns3gym import ns3env + +class test(object): + def __init__(self, unifelxController, **kwargs): + env = gym.make('ns3-v0') + ob_space = env.observation_space + ac_space = env.action_space + print("Observation space: ", ob_space, ob_space.dtype) + print("Action space: ", ac_space, ac_space.n) + + def run(self): + s_size = ob_space.shape[0] + a_size = ac_space.n + model = keras.Sequential() + model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) + model.add(keras.layers.Dense(a_size, activation='softmax')) + model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) + + total_episodes = 200 + max_env_steps = 100 + env._max_episode_steps = max_env_steps + + epsilon = 1.0 # exploration rate + epsilon_min = 0.01 + epsilon_decay = 0.999 + + time_history = [] + rew_history = [] + + for e in range(total_episodes): + + state = env.reset() + state = np.reshape(state, [1, s_size]) + rewardsum = 0 + for time in range(max_env_steps): + + # Choose action + if np.random.rand(1) < epsilon: + action = np.random.randint(a_size) + else: + action = np.argmax(model.predict(state)[0]) + + # Step + next_state, reward, done, _ = env.step(action) + + if done: + print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" + .format(e, total_episodes, time, rewardsum, epsilon)) + break + + next_state = np.reshape(next_state, [1, s_size]) + + # Train + target = reward + if not done: + target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) + + target_f = model.predict(state) + target_f[0][action] = target + model.fit(state, target_f, epochs=1, verbose=0) + + state = next_state + rewardsum += reward + if epsilon > epsilon_min: epsilon *= epsilon_decay + + time_history.append(time) + rew_history.append(rewardsum) diff --git a/openAI_RRM/readme.txt b/openAI_RRM/readme.txt new file mode 100644 index 0000000..cbbfb5a --- /dev/null +++ b/openAI_RRM/readme.txt @@ -0,0 +1,11 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +uniflex-agent --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml + +# For debugging mode run with -v option From 8788613f568c1b16290f5fd238d26b558e9bd738 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 13 Nov 2018 15:56:27 +0100 Subject: [PATCH 03/54] rename agent --- openAI_RRM/config_master.yaml | 2 +- openAI_RRM/my_control_app.py | 1 + openAI_RRM/openAI_gym/{test.py => rrm_agent.py} | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) rename openAI_RRM/openAI_gym/{test.py => rrm_agent.py} (99%) diff --git a/openAI_RRM/config_master.yaml b/openAI_RRM/config_master.yaml index d7b972d..8ca9d48 100644 --- a/openAI_RRM/config_master.yaml +++ b/openAI_RRM/config_master.yaml @@ -16,7 +16,7 @@ control_applications: file : my_control_app.py class_name : OpenAIRRM kwargs : { - 'openAI_controller': "openAI_gym.test" + 'openAI_controller': "openAI_gym.rrm_agent" } modules: diff --git a/openAI_RRM/my_control_app.py b/openAI_RRM/my_control_app.py index d84839f..d66cba4 100755 --- a/openAI_RRM/my_control_app.py +++ b/openAI_RRM/my_control_app.py @@ -45,6 +45,7 @@ def __init__(self,**kwargs): def my_start_function(self): print("start control app") self.running = True + self.openAI_controller.run() @modules.on_exit() def my_stop_function(self): diff --git a/openAI_RRM/openAI_gym/test.py b/openAI_RRM/openAI_gym/rrm_agent.py similarity index 99% rename from openAI_RRM/openAI_gym/test.py rename to openAI_RRM/openAI_gym/rrm_agent.py index ecb8fe3..89c8d09 100644 --- a/openAI_RRM/openAI_gym/test.py +++ b/openAI_RRM/openAI_gym/rrm_agent.py @@ -8,7 +8,7 @@ from tensorflow import keras from ns3gym import ns3env -class test(object): +class rrm_agent(object): def __init__(self, unifelxController, **kwargs): env = gym.make('ns3-v0') ob_space = env.observation_space From b5bb2d27a4f2fb3a9234f9549bb0cfb867f76da9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 26 Nov 2018 22:13:12 +0100 Subject: [PATCH 04/54] abstract controller --- ...y_control_app.py => channel_controller.py} | 2 +- openAI_RRM/controller.py | 39 +++++++++ openAI_RRM/openAI_gym/__init__.py | 0 openAI_RRM/openAI_gym/rrm_agent.py | 77 ------------------ openAI_RRM/rrm_agent.py | 80 +++++++++++++++++++ 5 files changed, 120 insertions(+), 78 deletions(-) rename openAI_RRM/{my_control_app.py => channel_controller.py} (99%) create mode 100644 openAI_RRM/controller.py delete mode 100644 openAI_RRM/openAI_gym/__init__.py delete mode 100644 openAI_RRM/openAI_gym/rrm_agent.py create mode 100644 openAI_RRM/rrm_agent.py diff --git a/openAI_RRM/my_control_app.py b/openAI_RRM/channel_controller.py similarity index 99% rename from openAI_RRM/my_control_app.py rename to openAI_RRM/channel_controller.py index d66cba4..053ad37 100755 --- a/openAI_RRM/my_control_app.py +++ b/openAI_RRM/channel_controller.py @@ -20,7 +20,7 @@ def __init__(self): super().__init__() -class OpenAIRRM(modules.ControlApplication): +class UniflexChannelController(modules.ControlApplication): def __init__(self,**kwargs): super(OpenAIRRM, self).__init__() self.log = logging.getLogger('OpenAI_RRM') diff --git a/openAI_RRM/controller.py b/openAI_RRM/controller.py new file mode 100644 index 0000000..c8fc4d0 --- /dev/null +++ b/openAI_RRM/controller.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 + +from UniFlexGym.interfaces.uniflex_controller import UniFlexController +#import os,sys,inspect +#current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +#parent_dir = os.path.dirname(current_dir) +#sys.path.insert(0, parent_dir) +from channel_controller import UniflexChannelController + +import gym + +class Controller(UniFlexController): + def __init__(self, **kwargs): + super() + return + + def reset(self): + return + + def execute_action(self, action): + return + + def render(): + return + + def get_observationSpace(self): + return + + def get_actionSpace(self): + return + + def get_observation(self): + return + + def get_gameOver(self): + return + + def get_reward(self): + return diff --git a/openAI_RRM/openAI_gym/__init__.py b/openAI_RRM/openAI_gym/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/openAI_RRM/openAI_gym/rrm_agent.py b/openAI_RRM/openAI_gym/rrm_agent.py deleted file mode 100644 index 89c8d09..0000000 --- a/openAI_RRM/openAI_gym/rrm_agent.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import gym -#import tensorflow as tf -#import tensorflow.contrib.slim as slim -import numpy as np -from tensorflow import keras -from ns3gym import ns3env - -class rrm_agent(object): - def __init__(self, unifelxController, **kwargs): - env = gym.make('ns3-v0') - ob_space = env.observation_space - ac_space = env.action_space - print("Observation space: ", ob_space, ob_space.dtype) - print("Action space: ", ac_space, ac_space.n) - - def run(self): - s_size = ob_space.shape[0] - a_size = ac_space.n - model = keras.Sequential() - model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) - model.add(keras.layers.Dense(a_size, activation='softmax')) - model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) - - total_episodes = 200 - max_env_steps = 100 - env._max_episode_steps = max_env_steps - - epsilon = 1.0 # exploration rate - epsilon_min = 0.01 - epsilon_decay = 0.999 - - time_history = [] - rew_history = [] - - for e in range(total_episodes): - - state = env.reset() - state = np.reshape(state, [1, s_size]) - rewardsum = 0 - for time in range(max_env_steps): - - # Choose action - if np.random.rand(1) < epsilon: - action = np.random.randint(a_size) - else: - action = np.argmax(model.predict(state)[0]) - - # Step - next_state, reward, done, _ = env.step(action) - - if done: - print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" - .format(e, total_episodes, time, rewardsum, epsilon)) - break - - next_state = np.reshape(next_state, [1, s_size]) - - # Train - target = reward - if not done: - target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) - - target_f = model.predict(state) - target_f[0][action] = target - model.fit(state, target_f, epochs=1, verbose=0) - - state = next_state - rewardsum += reward - if epsilon > epsilon_min: epsilon *= epsilon_decay - - time_history.append(time) - rew_history.append(rewardsum) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py new file mode 100644 index 0000000..c5ce951 --- /dev/null +++ b/openAI_RRM/rrm_agent.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +#import tensorflow as tf +#import tensorflow.contrib.slim as slim +import numpy as np +#from tensorflow import keras + + +#create uniflex environment, steptime is 10sec +env = gym.make('uniflex-v0') +env.configure(steptime=10) +env.start_controller() + +''' +ob_space = env.observation_space +ac_space = env.action_space +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.n) + +s_size = ob_space.shape[0] +a_size = ac_space.n +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) + +total_episodes = 200 +max_env_steps = 100 +env._max_episode_steps = max_env_steps + +epsilon = 1.0 # exploration rate +epsilon_min = 0.01 +epsilon_decay = 0.999 + +time_history = [] +rew_history = [] + +for e in range(total_episodes): + + state = env.reset() + state = np.reshape(state, [1, s_size]) + rewardsum = 0 + for time in range(max_env_steps): + # Choose action + if np.random.rand(1) < epsilon: + action = np.random.randint(a_size) + else: + action = np.argmax(model.predict(state)[0]) + + # Step + next_state, reward, done, _ = env.step(action) + + if done: + print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" + .format(e, total_episodes, time, rewardsum, epsilon)) + break + + next_state = np.reshape(next_state, [1, s_size]) + + # Train + target = reward + if not done: + target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) + + target_f = model.predict(state) + target_f[0][action] = target + model.fit(state, target_f, epochs=1, verbose=0) + + state = next_state + rewardsum += reward + if epsilon > epsilon_min: epsilon *= epsilon_decay + + time_history.append(time) + rew_history.append(rewardsum) +''' From 3f2db931f8ea9b90bf5d12b5b1d4b4a40181f772 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 27 Nov 2018 17:03:39 +0100 Subject: [PATCH 05/54] channel management controller --- openAI_RRM/channel_controller.py | 22 +++++----- openAI_RRM/controller.py | 70 ++++++++++++++++++++++++++++++-- openAI_RRM/rrm_agent.py | 7 ++++ 3 files changed, 85 insertions(+), 14 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 053ad37..90dbc2a 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -22,8 +22,8 @@ def __init__(self): class UniflexChannelController(modules.ControlApplication): def __init__(self,**kwargs): - super(OpenAIRRM, self).__init__() - self.log = logging.getLogger('OpenAI_RRM') + super(UniflexChannelController, self).__init__() + self.log = logging.getLogger('ChannelController') self.running = False self.timeInterval = 10 @@ -33,13 +33,13 @@ def __init__(self,**kwargs): self.packetLossEventsEnabled = False self.channel = 1 - if not "openAI_controller" in kwargs: - raise ValueError("There is no OpenAI gym controller specified. Can not find \"" + "openAI_controller" + "\" as kwargs in the config file.") - else: - __import__(kwargs["openAI_controller"], globals(), locals(), [], 0) - splits = kwargs["openAI_controller"].split('.') - class_name = splits[-1] - self.openAI_controller = class_name(self, kwargs) +# if not "openAI_controller" in kwargs: +# raise ValueError("There is no OpenAI gym controller specified. Can not #find \"" + "openAI_controller" + "\" as kwargs in the config file.") +# else: +# __import__(kwargs["openAI_controller"], globals(), locals(), [], 0) +# splits = kwargs["openAI_controller"].split('.') +# class_name = splits[-1] +# self.openAI_controller = class_name(self, kwargs) @modules.on_start() def my_start_function(self): @@ -152,9 +152,9 @@ def set_channel(self, node_uuid, dev_uuid, ifaceName, channel_number, channel_wi if device is None: return False if channel_width is not None: - device.blocking(False).set_channel(channel_number, "wlan0", channel_width= channel_width) + device.blocking(False).set_channel(channel_number, ifaceName, channel_width= channel_width) else: - device.blocking(False).set_channel(channel_number, "wlan0") + device.blocking(False).set_channel(channel_number, ifaceName) return True ''' diff --git a/openAI_RRM/controller.py b/openAI_RRM/controller.py index c8fc4d0..55137ad 100644 --- a/openAI_RRM/controller.py +++ b/openAI_RRM/controller.py @@ -6,18 +6,42 @@ #parent_dir = os.path.dirname(current_dir) #sys.path.insert(0, parent_dir) from channel_controller import UniflexChannelController +from functools import reduce import gym class Controller(UniFlexController): def __init__(self, **kwargs): super() + self.channel_controller = UniflexChannelController() + self.observationSpace = [] + self.lastObservation = [] return def reset(self): + self.observationSpace = self._create_client_list() + self.actionSpace = self._create_interface_list() + + interfaces = self.channel_controller.get_interfaces() + + # set a start channel for each interface: + channel = 1 + for node in interfaces: + for device in node['devices']: + for iface in device['interfaces']: + self.channel_controller.set_channel( + node['uuid'], device['uuid'], iface, channel, None) + channel += 5 + if channel > 12: + channel = 1 + # clear bandwidth counter + self.channel_controller.get_bandwidth() return def execute_action(self, action): + for index, actionStep in action: + interface = self.actionSpace[index] + self.channel_controller.set_channel(interface['node'], interface['device'], interface['iface'], actionStep, None) return def render(): @@ -30,10 +54,50 @@ def get_actionSpace(self): return def get_observation(self): - return + observation = [] + bandwidth = self.channel_controller.get_bandwidth() + bandwidth = sorted(bandwidth, key=lambda k: k['mac']) + for client in self.observationSpace: + bandwidth = self. _get_bandwidth_by_client( bandwidth, client) + if bandwidth in None: + bandwidth = 0 + observation.append(bandwidth) + + self.lastObservation = observation + return observation + # game over if there is a new interface def get_gameOver(self): - return + clients = self._create_client_list() + return len(set(clients).symmetric_difference(set(self.observationSpace))) == 0 def get_reward(self): - return + if len(self.lastObservation) > 0: + return reduce(lambda x, y: x^2 + y, self.lastObservation) + return 0 + + + + def _get_bandwidth_by_client(self, bandwidthList, clientData): + for client in bandwidthList: + if (client['mac'] is clientData['mac']) and (client['node'] is clientData['node']) and (client['device'] is clientData['device']) and (client['iface'] is clientData['iface']): + return client['bandwidth'] + return None + + def _create_client_list(self): + clientList = [] + clients = self.channel_controller.get_bandwidth() + for client in clients: + clientList.append({'mac': client['mac'], 'node': client['node']['uuid'], + 'device': client['device']['uuid'], 'iface': client['interface']}) + clients = sorted(clients, key=lambda k: k['mac']) + return clientList + + def _create_interface_list(self): + interfaceList = [] + interfaces = self.channel_controller.get_interfaces() + for node in interfaces: + for device in node['devices']: + for iface in device['interfaces']: + interfaceList.append({'node': node['uuid'], 'device': device['uuid'], 'iface': iface}) + return interfaceList diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index c5ce951..bdbfd6f 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -13,6 +13,13 @@ env = gym.make('uniflex-v0') env.configure(steptime=10) env.start_controller() +env.reset() + +n = 0 + +while True: + env.step([]) + print ("next step") ''' ob_space = env.observation_space From ff26c1e917699d5082f5ac2f3dd327d36473fa71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Wed, 5 Dec 2018 11:57:09 +0100 Subject: [PATCH 06/54] one uniflex controller --- openAI_RRM/channel_controller.py | 101 +++++++++++++++++++++++++++++- openAI_RRM/config_master.yaml | 1 + openAI_RRM/controller.py | 103 ------------------------------- openAI_RRM/rrm_agent.py | 4 +- 4 files changed, 103 insertions(+), 106 deletions(-) delete mode 100644 openAI_RRM/controller.py diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 90dbc2a..cd8fbdb 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -9,6 +9,8 @@ from common import AveragedSpectrumScanSampleEvent from common import ChangeWindowSizeEvent +from UniFlexGym.interfaces.uniflex_controller import UniFlexController + __author__ = "Piotr Gawlowicz, Sascha Rösler" __copyright__ = "Copyright (c) 2016, Technische Universität Berlin" __version__ = "0.1.0" @@ -20,7 +22,7 @@ def __init__(self): super().__init__() -class UniflexChannelController(modules.ControlApplication): +class UniflexChannelController(modules.ControlApplication, UniFlexController): def __init__(self,**kwargs): super(UniflexChannelController, self).__init__() self.log = logging.getLogger('ChannelController') @@ -333,7 +335,104 @@ def periodic_evaluation(self, event): self.channel += 1 if self.channel > 13: self.channel = 1 + + ''' + OpenAI Gym Uniflex env API + ''' + + def __init__(self, **kwargs): + super() + self.observationSpace = [] + self.lastObservation = [] + return + + def reset(self): + self.observationSpace = self._create_client_list() + self.actionSpace = self._create_interface_list() + + interfaces = self.get_interfaces() + + # set a start channel for each interface: + channel = 1 + for node in interfaces: + for device in node['devices']: + for iface in device['interfaces']: + self.set_channel( + node['uuid'], device['uuid'], iface, channel, None) + channel += 5 + if channel > 12: + channel = 1 + # clear bandwidth counter + self.get_bandwidth() + return + + def execute_action(self, action): + for index, actionStep in action: + interface = self.actionSpace[index] + self.set_channel(interface['node'], interface['device'], interface['iface'], actionStep, None) + return + + def render(): + return + + def get_observationSpace(self): + return + + def get_actionSpace(self): + return + + def get_observation(self): + observation = [] + bandwidth = self.get_bandwidth() + bandwidth = sorted(bandwidth, key=lambda k: k['mac']) + for client in self.observationSpace: + bandwidth = self. _get_bandwidth_by_client( bandwidth, client) + if bandwidth in None: + bandwidth = 0 + observation.append(bandwidth) + self.lastObservation = observation + return observation + + # game over if there is a new interface + def get_gameOver(self): + clients = self._create_client_list() + return len(set(clients).symmetric_difference(set(self.observationSpace))) == 0 + + def get_reward(self): + if len(self.lastObservation) > 0: + return reduce(lambda x, y: x^2 + y, self.lastObservation) + return 0 + + + + def _get_bandwidth_by_client(self, bandwidthList, clientData): + for client in bandwidthList: + if (client['mac'] is clientData['mac']) and (client['node'] is clientData['node']) and (client['device'] is clientData['device']) and (client['iface'] is clientData['iface']): + return client['bandwidth'] + return None + + def _create_client_list(self): + clientList = [] + clients = self.get_bandwidth() + for client in clients: + clientList.append({'mac': client['mac'], 'node': client['node']['uuid'], + 'device': client['device']['uuid'], 'iface': client['interface']}) + clients = sorted(clients, key=lambda k: k['mac']) + return clientList + + def _create_interface_list(self): + interfaceList = [] + interfaces = self.get_interfaces() + for node in interfaces: + for device in node['devices']: + for iface in device['interfaces']: + interfaceList.append({'node': node['uuid'], 'device': device['uuid'], 'iface': iface}) + return interfaceList + + + + ''' print(self.get_bandwidth()) diff --git a/openAI_RRM/config_master.yaml b/openAI_RRM/config_master.yaml index 8ca9d48..89c7c68 100644 --- a/openAI_RRM/config_master.yaml +++ b/openAI_RRM/config_master.yaml @@ -15,6 +15,7 @@ control_applications: myController: file : my_control_app.py class_name : OpenAIRRM + openAIGymController: True kwargs : { 'openAI_controller': "openAI_gym.rrm_agent" } diff --git a/openAI_RRM/controller.py b/openAI_RRM/controller.py deleted file mode 100644 index 55137ad..0000000 --- a/openAI_RRM/controller.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python3 - -from UniFlexGym.interfaces.uniflex_controller import UniFlexController -#import os,sys,inspect -#current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) -#parent_dir = os.path.dirname(current_dir) -#sys.path.insert(0, parent_dir) -from channel_controller import UniflexChannelController -from functools import reduce - -import gym - -class Controller(UniFlexController): - def __init__(self, **kwargs): - super() - self.channel_controller = UniflexChannelController() - self.observationSpace = [] - self.lastObservation = [] - return - - def reset(self): - self.observationSpace = self._create_client_list() - self.actionSpace = self._create_interface_list() - - interfaces = self.channel_controller.get_interfaces() - - # set a start channel for each interface: - channel = 1 - for node in interfaces: - for device in node['devices']: - for iface in device['interfaces']: - self.channel_controller.set_channel( - node['uuid'], device['uuid'], iface, channel, None) - channel += 5 - if channel > 12: - channel = 1 - # clear bandwidth counter - self.channel_controller.get_bandwidth() - return - - def execute_action(self, action): - for index, actionStep in action: - interface = self.actionSpace[index] - self.channel_controller.set_channel(interface['node'], interface['device'], interface['iface'], actionStep, None) - return - - def render(): - return - - def get_observationSpace(self): - return - - def get_actionSpace(self): - return - - def get_observation(self): - observation = [] - bandwidth = self.channel_controller.get_bandwidth() - bandwidth = sorted(bandwidth, key=lambda k: k['mac']) - for client in self.observationSpace: - bandwidth = self. _get_bandwidth_by_client( bandwidth, client) - if bandwidth in None: - bandwidth = 0 - observation.append(bandwidth) - - self.lastObservation = observation - return observation - - # game over if there is a new interface - def get_gameOver(self): - clients = self._create_client_list() - return len(set(clients).symmetric_difference(set(self.observationSpace))) == 0 - - def get_reward(self): - if len(self.lastObservation) > 0: - return reduce(lambda x, y: x^2 + y, self.lastObservation) - return 0 - - - - def _get_bandwidth_by_client(self, bandwidthList, clientData): - for client in bandwidthList: - if (client['mac'] is clientData['mac']) and (client['node'] is clientData['node']) and (client['device'] is clientData['device']) and (client['iface'] is clientData['iface']): - return client['bandwidth'] - return None - - def _create_client_list(self): - clientList = [] - clients = self.channel_controller.get_bandwidth() - for client in clients: - clientList.append({'mac': client['mac'], 'node': client['node']['uuid'], - 'device': client['device']['uuid'], 'iface': client['interface']}) - clients = sorted(clients, key=lambda k: k['mac']) - return clientList - - def _create_interface_list(self): - interfaceList = [] - interfaces = self.channel_controller.get_interfaces() - for node in interfaces: - for device in node['devices']: - for iface in device['interfaces']: - interfaceList.append({'node': node['uuid'], 'device': device['uuid'], 'iface': iface}) - return interfaceList diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index bdbfd6f..ec6d27f 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -11,8 +11,8 @@ #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') -env.configure(steptime=10) -env.start_controller() +#env.configure() +env.start_controller(steptime=10) env.reset() n = 0 From 0d5542b2d5f81a7bc870e64f179f3d1dbfff4a3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Wed, 5 Dec 2018 17:02:48 +0100 Subject: [PATCH 07/54] debug controller --- openAI_RRM/channel_controller.py | 14 ++++++-------- openAI_RRM/config_master.yaml | 4 ++-- openAI_RRM/rrm_agent.py | 9 ++++++++- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index cd8fbdb..c98761f 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -29,12 +29,15 @@ def __init__(self,**kwargs): self.running = False self.timeInterval = 10 - self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) - self.timer.start(self.timeInterval) +# self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) +# self.timer.start(self.timeInterval) self.packetLossEventsEnabled = False self.channel = 1 + self.observationSpace = [] + self.lastObservation = [] + # if not "openAI_controller" in kwargs: # raise ValueError("There is no OpenAI gym controller specified. Can not #find \"" + "openAI_controller" + "\" as kwargs in the config file.") # else: @@ -47,7 +50,7 @@ def __init__(self,**kwargs): def my_start_function(self): print("start control app") self.running = True - self.openAI_controller.run() +# self.openAI_controller.run() @modules.on_exit() def my_stop_function(self): @@ -340,11 +343,6 @@ def periodic_evaluation(self, event): OpenAI Gym Uniflex env API ''' - def __init__(self, **kwargs): - super() - self.observationSpace = [] - self.lastObservation = [] - return def reset(self): self.observationSpace = self._create_client_list() diff --git a/openAI_RRM/config_master.yaml b/openAI_RRM/config_master.yaml index 89c7c68..3946cb1 100644 --- a/openAI_RRM/config_master.yaml +++ b/openAI_RRM/config_master.yaml @@ -13,8 +13,8 @@ broker: control_applications: myController: - file : my_control_app.py - class_name : OpenAIRRM + file : channel_controller.py + class_name : UniflexChannelController openAIGymController: True kwargs : { 'openAI_controller': "openAI_gym.rrm_agent" diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index ec6d27f..f87fcdb 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -7,12 +7,19 @@ #import tensorflow.contrib.slim as slim import numpy as np #from tensorflow import keras +import argparse +parser = argparse.ArgumentParser(description='Uniflex reader') +parser.add_argument('--config', help='path to the uniflex config file', default=None) +args = parser.parse_args() +if not args.config: + print("No config file specified!") + quit() #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() -env.start_controller(steptime=10) +env.start_controller(steptime=10, config=args.config) env.reset() n = 0 From 069730671adfe33eec22277546f77dc943b5c8b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sun, 9 Dec 2018 20:00:42 +0100 Subject: [PATCH 08/54] debug controller --- openAI_RRM/channel_controller.py | 36 ++++++++++++++++++-------------- openAI_RRM/rrm_agent.py | 2 ++ 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index c98761f..83642ac 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -191,7 +191,7 @@ def get_bandwidth(self): for flow in device.my_control_flow: flow['old'] = True - + print("send getIface!") for interface in device.get_interfaces(): infos = device.get_info_of_connected_devices(interface) @@ -319,7 +319,10 @@ def periodic_evaluation(self, event): if len(self.get_nodes()) == 0: return - + self.reset() + self.execute_action([1]) + print(self.get_observation()) + ''' flows = [] ifaces = self.get_interfaces() @@ -338,6 +341,7 @@ def periodic_evaluation(self, event): self.channel += 1 if self.channel > 13: self.channel = 1 + ''' ''' OpenAI Gym Uniflex env API @@ -352,8 +356,8 @@ def reset(self): # set a start channel for each interface: channel = 1 - for node in interfaces: - for device in node['devices']: + for nodeUuid, node in interfaces.items(): + for devUuid, device in node['devices'].items(): for iface in device['interfaces']: self.set_channel( node['uuid'], device['uuid'], iface, channel, None) @@ -365,7 +369,7 @@ def reset(self): return def execute_action(self, action): - for index, actionStep in action: + for index, actionStep in enumerate(action): interface = self.actionSpace[index] self.set_channel(interface['node'], interface['device'], interface['iface'], actionStep, None) return @@ -381,11 +385,11 @@ def get_actionSpace(self): def get_observation(self): observation = [] - bandwidth = self.get_bandwidth() - bandwidth = sorted(bandwidth, key=lambda k: k['mac']) + bandwidthList = self.get_bandwidth() + #bandwidth = sorted(bandwidth, key=lambda k: k['mac']) for client in self.observationSpace: - bandwidth = self. _get_bandwidth_by_client( bandwidth, client) - if bandwidth in None: + bandwidth = self. _get_bandwidth_by_client( bandwidthList, client) + if bandwidth is None: bandwidth = 0 observation.append(bandwidth) @@ -405,25 +409,25 @@ def get_reward(self): def _get_bandwidth_by_client(self, bandwidthList, clientData): - for client in bandwidthList: - if (client['mac'] is clientData['mac']) and (client['node'] is clientData['node']) and (client['device'] is clientData['device']) and (client['iface'] is clientData['iface']): + for mac, client in bandwidthList.items(): + if (mac is clientData['mac']) and (client['node'] is clientData['node']) and (client['device'] is clientData['device']) and (client['iface'] is clientData['iface']): return client['bandwidth'] return None def _create_client_list(self): clientList = [] clients = self.get_bandwidth() - for client in clients: - clientList.append({'mac': client['mac'], 'node': client['node']['uuid'], + for mac, client in clients.items(): + clientList.append({'mac': mac, 'node': client['node']['uuid'], 'device': client['device']['uuid'], 'iface': client['interface']}) - clients = sorted(clients, key=lambda k: k['mac']) + clientList = sorted(clientList, key=lambda k: k['mac']) return clientList def _create_interface_list(self): interfaceList = [] interfaces = self.get_interfaces() - for node in interfaces: - for device in node['devices']: + for nodeUuid, node in interfaces.items(): + for devUuid, device in node['devices'].items(): for iface in device['interfaces']: interfaceList.append({'node': node['uuid'], 'device': device['uuid'], 'iface': iface}) return interfaceList diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index f87fcdb..f290097 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -20,7 +20,9 @@ env = gym.make('uniflex-v0') #env.configure() env.start_controller(steptime=10, config=args.config) +print ("before reset") env.reset() +print ("after reset") n = 0 From 3a631079453a734ebdd80cac9cb0856822292326 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 11 Dec 2018 11:44:40 +0100 Subject: [PATCH 09/54] some debug --- openAI_RRM/channel_controller.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 83642ac..1e24b77 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -29,8 +29,8 @@ def __init__(self,**kwargs): self.running = False self.timeInterval = 10 -# self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) -# self.timer.start(self.timeInterval) + self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) + self.timer.start(self.timeInterval) self.packetLossEventsEnabled = False self.channel = 1 From ad7704002dd178b603ca2073e1b1e4a21553f281 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 11 Dec 2018 11:45:03 +0100 Subject: [PATCH 10/54] switch of timer --- openAI_RRM/channel_controller.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 1e24b77..83642ac 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -29,8 +29,8 @@ def __init__(self,**kwargs): self.running = False self.timeInterval = 10 - self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) - self.timer.start(self.timeInterval) +# self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) +# self.timer.start(self.timeInterval) self.packetLossEventsEnabled = False self.channel = 1 From a6c476870da1e27735674f4742f629ba6875a430 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Wed, 12 Dec 2018 19:40:45 +0100 Subject: [PATCH 11/54] debug code --- openAI_RRM/channel_controller.py | 8 ++++++-- openAI_RRM/rrm_agent.py | 17 ++++++++++++----- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 83642ac..d231d9d 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -2,6 +2,8 @@ import datetime import random +from functools import reduce + from sbi.radio_device.events import PacketLossEvent from uniflex.core import modules from uniflex.core import events @@ -191,7 +193,7 @@ def get_bandwidth(self): for flow in device.my_control_flow: flow['old'] = True - print("send getIface!") + for interface in device.get_interfaces(): infos = device.get_info_of_connected_devices(interface) @@ -399,7 +401,9 @@ def get_observation(self): # game over if there is a new interface def get_gameOver(self): clients = self._create_client_list() - return len(set(clients).symmetric_difference(set(self.observationSpace))) == 0 + clientHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in clients] + observationSpaceHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in self.observationSpace] + return not len(set(clientHash).symmetric_difference(set(observationSpaceHash))) == 0 def get_reward(self): if len(self.lastObservation) > 0: diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index f290097..26b1657 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -8,6 +8,8 @@ import numpy as np #from tensorflow import keras import argparse +import logging + parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) @@ -20,15 +22,20 @@ env = gym.make('uniflex-v0') #env.configure() env.start_controller(steptime=10, config=args.config) -print ("before reset") -env.reset() -print ("after reset") + n = 0 while True: - env.step([]) - print ("next step") + print ("reset") + env.reset() + gameover = False + + while not gameover: + ob, reward, gameover, info = env.step([]) + print (ob) + print (gameover) + print ("next step") ''' ob_space = env.observation_space From ae61b13beaf321ce7bb6d644341383c8f5b58e40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sun, 23 Dec 2018 15:47:43 +0100 Subject: [PATCH 12/54] multi dim controller, 1client agent --- openAI_RRM/channel_controller.py | 27 +++++++---- openAI_RRM/readme.txt | 5 +- openAI_RRM/rrm_agent.py | 80 ++++++++++++++++++++++++++++---- 3 files changed, 95 insertions(+), 17 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index d231d9d..64572d3 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -1,6 +1,7 @@ import logging import datetime import random +import numpy from functools import reduce @@ -11,6 +12,8 @@ from common import AveragedSpectrumScanSampleEvent from common import ChangeWindowSizeEvent +from gym import spaces + from UniFlexGym.interfaces.uniflex_controller import UniFlexController __author__ = "Piotr Gawlowicz, Sascha Rösler" @@ -371,26 +374,34 @@ def reset(self): return def execute_action(self, action): - for index, actionStep in enumerate(action): - interface = self.actionSpace[index] - self.set_channel(interface['node'], interface['device'], interface['iface'], actionStep, None) + try: + for index, actionStep in enumerate(action): + interface = self.actionSpace[index] + self.set_channel(interface['node'], interface['device'], interface['iface'], actionStep*4+1, None) + except TypeError: + interface = self.actionSpace[0] + self.set_channel(interface['node'], interface['device'], interface['iface'], action*4+1, None) return def render(): return def get_observationSpace(self): - return + return spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) def get_actionSpace(self): - return + maxValues = [4 for i in self.actionSpace] + return spaces.MultiDiscrete(maxValues) def get_observation(self): + # for simulation + self.simulate_flows() + observation = [] bandwidthList = self.get_bandwidth() #bandwidth = sorted(bandwidth, key=lambda k: k['mac']) for client in self.observationSpace: - bandwidth = self. _get_bandwidth_by_client( bandwidthList, client) + bandwidth = self._get_bandwidth_by_client( bandwidthList, client) if bandwidth is None: bandwidth = 0 observation.append(bandwidth) @@ -407,14 +418,14 @@ def get_gameOver(self): def get_reward(self): if len(self.lastObservation) > 0: - return reduce(lambda x, y: x^2 + y, self.lastObservation) + return reduce(lambda x, y: x**2 + y, self.lastObservation) return 0 def _get_bandwidth_by_client(self, bandwidthList, clientData): for mac, client in bandwidthList.items(): - if (mac is clientData['mac']) and (client['node'] is clientData['node']) and (client['device'] is clientData['device']) and (client['iface'] is clientData['iface']): + if (mac == clientData['mac']) and (client['node']['uuid'] == clientData['node']) and (client['device']['uuid'] == clientData['device']) and (client['interface'] == clientData['iface']): return client['bandwidth'] return None diff --git a/openAI_RRM/readme.txt b/openAI_RRM/readme.txt index cbbfb5a..a99904a 100644 --- a/openAI_RRM/readme.txt +++ b/openAI_RRM/readme.txt @@ -1,11 +1,14 @@ # 1. Run control program and all modules on local node uniflex-agent --config ./config_local.yaml +source ~/Uniflex/dev/bin/activate + # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -uniflex-agent --config ./config_master.yaml +python3 rrm_agent.py --config ./config_master.yaml # 2c. Run modules in slave node: uniflex-agent --config ./config_slave.yaml +uniflex-agent --config ./config_slave2.yaml # For debugging mode run with -v option diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 26b1657..d082031 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -3,12 +3,13 @@ import gym import UniFlexGym -#import tensorflow as tf -#import tensorflow.contrib.slim as slim +import tensorflow as tf +import tensorflow.contrib.slim as slim import numpy as np -#from tensorflow import keras +from tensorflow import keras import argparse import logging +import time parser = argparse.ArgumentParser(description='Uniflex reader') @@ -21,21 +22,84 @@ #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() -env.start_controller(steptime=10, config=args.config) +env.start_controller(steptime=1, config=args.config) n = 0 while True: print ("reset") - env.reset() + state = env.reset() gameover = False + ''' + code (c) piotr + ''' + ob_space = env.observation_space + ac_space = env.action_space + + + print("Observation space: ", ob_space, ob_space.dtype) + print("Action space: ", ac_space, ac_space.nvec) + + s_size = ob_space.shape[0] + a_size = ac_space.nvec + if(s_size < 1 or len(a_size) < 1): + print("No client registered - retry") + continue + state = np.reshape(state, [1, s_size]) + model = keras.Sequential() + model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) + model.add(keras.layers.Dense(a_size, activation='softmax')) + model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) + epsilon = 1.0 # exploration rate + epsilon_min = 0.01 + epsilon_decay = 0.999 + rewardsum = 0 + time_history = [] + rew_history = [] + e = 0 + while not gameover: - ob, reward, gameover, info = env.step([]) - print (ob) - print (gameover) + # Choose action + if np.random.rand(1) < epsilon: + action = np.random.randint(a_size) + else: + action = np.argmax(model.predict(state)[0]) + + # Step + next_state, reward, gameover, _ = env.step(action) + + if gameover: + print("episode: {}, rew: {}, eps: {:.2}" + .format(e, rewardsum, epsilon)) + break + + next_state = np.reshape(next_state, [1, s_size]) + + # Train + target = reward + if not gameover: + target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) + + target_f = model.predict(state) + target_f[0][action] = target + model.fit(state, target_f, epochs=1, verbose=0) + + state = next_state + rewardsum += reward + if epsilon > epsilon_min: epsilon *= epsilon_decay + + rew_history.append(rewardsum) + print ("Bandwidth: " + str(next_state)) + print ("Reward: " + str(reward)) + print ("GameOver: " + str(gameover)) + print ("Channel selection:" + str(action)) print ("next step") + + e += 1 ''' ob_space = env.observation_space From f62be9f537a4813f033de1f6e943f792fc98febc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Wed, 6 Mar 2019 13:18:07 +0100 Subject: [PATCH 13/54] debug agent and controller --- openAI_RRM/channel_controller.py | 76 ++++++++++++--------- openAI_RRM/config_slave.yaml | 4 +- openAI_RRM/config_slave2.yaml | 4 +- openAI_RRM/rrm_agent.py | 109 ++++++++++++++++++++----------- 4 files changed, 121 insertions(+), 72 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 64572d3..aac7d58 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -2,6 +2,7 @@ import datetime import random import numpy +from math import * from functools import reduce @@ -21,6 +22,7 @@ __version__ = "0.1.0" __email__ = "{gawlowicz}@tkn.tu-berlin.de, s.resler@campus.tu-berlin.de" +numChannels = 2 class PeriodicEvaluationTimeEvent(events.TimeEvent): def __init__(self): @@ -41,7 +43,9 @@ def __init__(self,**kwargs): self.channel = 1 self.observationSpace = [] + self.registeredClients = self._create_client_list() self.lastObservation = [] + self.actionSet = [] # if not "openAI_controller" in kwargs: # raise ValueError("There is no OpenAI gym controller specified. Can not #find \"" + "openAI_controller" + "\" as kwargs in the config file.") @@ -305,9 +309,9 @@ def simulate_flows(self): chnum = device.get_channel(interface) chw = device.get_channel_width(interface) infos = device.get_info_of_connected_devices(interface) + mac = device.get_address() - for mac in infos: - flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw, 'iface': interface}) + flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw, 'iface': interface}) for node in self.get_nodes(): for device in node.get_devices(): @@ -354,8 +358,13 @@ def periodic_evaluation(self, event): def reset(self): - self.observationSpace = self._create_client_list() - self.actionSpace = self._create_interface_list() + self.registeredClients = self._create_client_list() + self.observationSpace = self.get_observationSpace() + self.actionSpace = self.get_actionSpace() + self.actionSet = [] + + #for index in range(actionSpace): + interfaces = self.get_interfaces() @@ -370,56 +379,61 @@ def reset(self): if channel > 12: channel = 1 # clear bandwidth counter + self.simulate_flows() self.get_bandwidth() return def execute_action(self, action): - try: - for index, actionStep in enumerate(action): - interface = self.actionSpace[index] - self.set_channel(interface['node'], interface['device'], interface['iface'], actionStep*4+1, None) - except TypeError: - interface = self.actionSpace[0] - self.set_channel(interface['node'], interface['device'], interface['iface'], action*4+1, None) + for index, interface in enumerate(self._create_interface_list()): + ifaceaction = int(action / (pow(numChannels,index))) + ifaceaction = ifaceaction % numChannels + self.set_channel(interface['node'], interface['device'], interface['iface'], ifaceaction*numChannels+1, None) + #try: + # for index, actionStep in enumerate(action): + # interface = self.actionSpace[index] + # self.set_channel(interface['node'], interface['device'], interface['iface'], actionStep*4+1, None) + #except TypeError: + # interface = self.actionSpace[0] + # self.set_channel(interface['node'], interface['device'], interface['iface'], action*4+1, None) return def render(): return def get_observationSpace(self): - return spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) + maxValues = [numChannels for i in self._create_interface_list()] + #return spaces.Box(low=0, high=numChannels, shape=(len(self._create_interface_list()),0), dtype=numpy.float32) + return spaces.MultiDiscrete(maxValues) + #spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) def get_actionSpace(self): - maxValues = [4 for i in self.actionSpace] - return spaces.MultiDiscrete(maxValues) + if len(self._create_interface_list()) == 0: + return spaces.Discrete(0) + return spaces.Discrete(pow(numChannels, len(self._create_interface_list()))) def get_observation(self): - # for simulation - self.simulate_flows() - - observation = [] - bandwidthList = self.get_bandwidth() - #bandwidth = sorted(bandwidth, key=lambda k: k['mac']) - for client in self.observationSpace: - bandwidth = self._get_bandwidth_by_client( bandwidthList, client) - if bandwidth is None: - bandwidth = 0 - observation.append(bandwidth) - - self.lastObservation = observation + channels = self.get_channels() + observation = list(map(lambda x: (x['channel number']-1) / numChannels, channels)) return observation # game over if there is a new interface def get_gameOver(self): clients = self._create_client_list() clientHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in clients] - observationSpaceHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in self.observationSpace] + observationSpaceHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in self.registeredClients] return not len(set(clientHash).symmetric_difference(set(observationSpaceHash))) == 0 def get_reward(self): - if len(self.lastObservation) > 0: - return reduce(lambda x, y: x**2 + y, self.lastObservation) - return 0 + # for simulation + self.simulate_flows() + + bandwidthList = self.get_bandwidth() + #bandwidth = sorted(bandwidth, key=lambda k: k['mac']) + reward = 0 + for key in bandwidthList: + item = bandwidthList[key] + reward += sqrt(item['bandwidth']) + return reward diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml index 4ce221e..c75b554 100644 --- a/openAI_RRM/config_slave.yaml +++ b/openAI_RRM/config_slave.yaml @@ -21,5 +21,7 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "MAC_List" : ["38:10:d5:d7:54:82", "38:10:d5:d7:59:23"]} + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:01'} diff --git a/openAI_RRM/config_slave2.yaml b/openAI_RRM/config_slave2.yaml index 21c58dc..b70173d 100644 --- a/openAI_RRM/config_slave2.yaml +++ b/openAI_RRM/config_slave2.yaml @@ -21,5 +21,7 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "MAC_List" : ["38:10:d5:9a:0b:60"]} + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], + 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:02'} diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index d082031..3851ec8 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -10,6 +10,8 @@ import argparse import logging import time +import matplotlib.pyplot as plt +from math import * parser = argparse.ArgumentParser(description='Uniflex reader') @@ -24,82 +26,111 @@ #env.configure() env.start_controller(steptime=1, config=args.config) +epsilon = 1.0 # exploration rate +epsilon_min = 0.01 +epsilon_decay = 0.99 + +time_history = [] +rew_history = [] -n = 0 +numChannels = 2 +episode = 0 while True: - print ("reset") - state = env.reset() - gameover = False + run = 0 + rewards = [] + actions = [] - ''' - code (c) piotr - ''' - ob_space = env.observation_space + state = env.reset() + n = 0 ac_space = env.action_space - - + ob_space = env.observation_space print("Observation space: ", ob_space, ob_space.dtype) - print("Action space: ", ac_space, ac_space.nvec) - + print("Action space: ", ac_space, ac_space.n) + s_size = ob_space.shape[0] - a_size = ac_space.nvec - if(s_size < 1 or len(a_size) < 1): - print("No client registered - retry") - continue - state = np.reshape(state, [1, s_size]) + a_size = ac_space.n model = keras.Sequential() model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) + model.add(keras.layers.Dense(5, activation='relu')) model.add(keras.layers.Dense(a_size, activation='softmax')) model.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) - epsilon = 1.0 # exploration rate - epsilon_min = 0.01 - epsilon_decay = 0.999 + + state = np.reshape(state, [1, s_size]) rewardsum = 0 - time_history = [] - rew_history = [] - e = 0 + done = False + + if a_size == 0: + print("there is no vaild AP - sleep 10 seconds") + time.sleep(2) + continue + + aps = int(log(a_size, numChannels)) + + for i in range(0, aps): + actions.append([]) - while not gameover: + while not done: # Choose action + #epsilon = 1 if np.random.rand(1) < epsilon: action = np.random.randint(a_size) else: action = np.argmax(model.predict(state)[0]) - + # Step - next_state, reward, gameover, _ = env.step(action) + next_state, reward, done, _ = env.step(action) - if gameover: - print("episode: {}, rew: {}, eps: {:.2}" - .format(e, rewardsum, epsilon)) + reward /= 1000 + + if done: + # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" + # .format(e, total_episodes, time, rewardsum, epsilon)) break - + next_state = np.reshape(next_state, [1, s_size]) - + # Train target = reward - if not gameover: - target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) + if not done: + target = (reward)# + 0.95 * np.amax(model.predict(next_state)[0])) + print(target) + target_f = model.predict(state) target_f[0][action] = target model.fit(state, target_f, epochs=1, verbose=0) - + state = next_state - rewardsum += reward + #rewardsum += reward if epsilon > epsilon_min: epsilon *= epsilon_decay - rew_history.append(rewardsum) - print ("Bandwidth: " + str(next_state)) + rewards.append(reward) + + for ap in range(0, aps): + ifaceaction = int(action / (pow(numChannels, ap))) + ifaceaction = ifaceaction % numChannels + actions[ap].append(ifaceaction) + print ("Reward: " + str(reward)) - print ("GameOver: " + str(gameover)) + print ("GameOver: " + str(done)) + print ("Next Channels: " + str(next_state)) print ("Channel selection:" + str(action)) print ("next step") - e += 1 + plt.subplot(211) + plt.plot(rewards) + plt.subplot(212) + for ap in range(0, aps): + plt.plot(actions[ap]) + plt.pause(0.05) + + run += 1 + + episode += 1 + ''' ob_space = env.observation_space From 16455d927712556ba763a1783bb16c74e0919401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Thu, 7 Mar 2019 22:14:57 +0100 Subject: [PATCH 14/54] simulation to config --- openAI_RRM/config_slave.yaml | 3 ++- openAI_RRM/config_slave2.yaml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml index c75b554..7c07ba5 100644 --- a/openAI_RRM/config_slave.yaml +++ b/openAI_RRM/config_slave.yaml @@ -23,5 +23,6 @@ modules: devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], - 'myMAC' : 'aa:aa:aa:aa:aa:01'} + 'myMAC' : 'aa:aa:aa:aa:aa:01', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54e6, 'txBytesRandom':0.2}} diff --git a/openAI_RRM/config_slave2.yaml b/openAI_RRM/config_slave2.yaml index b70173d..7a3f9ce 100644 --- a/openAI_RRM/config_slave2.yaml +++ b/openAI_RRM/config_slave2.yaml @@ -23,5 +23,6 @@ modules: devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], - 'myMAC' : 'aa:aa:aa:aa:aa:02'} + 'myMAC' : 'aa:aa:aa:aa:aa:02', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54e6, 'txBytesRandom':0.2}} From b47d62913acdf026f6e46a2cfae03358234deab0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Fri, 8 Mar 2019 20:28:36 +0100 Subject: [PATCH 15/54] change controller iface --- openAI_RRM/channel_controller.py | 7 ++++--- openAI_RRM/config_master.yaml | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index aac7d58..9ab5e47 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -22,8 +22,6 @@ __version__ = "0.1.0" __email__ = "{gawlowicz}@tkn.tu-berlin.de, s.resler@campus.tu-berlin.de" -numChannels = 2 - class PeriodicEvaluationTimeEvent(events.TimeEvent): def __init__(self): super().__init__() @@ -41,12 +39,15 @@ def __init__(self,**kwargs): self.packetLossEventsEnabled = False self.channel = 1 - + self.numChannels = 2 self.observationSpace = [] self.registeredClients = self._create_client_list() self.lastObservation = [] self.actionSet = [] + if 'numChannels' in kwargs: + self.numChannels = kwargs['numChannels'] + # if not "openAI_controller" in kwargs: # raise ValueError("There is no OpenAI gym controller specified. Can not #find \"" + "openAI_controller" + "\" as kwargs in the config file.") # else: diff --git a/openAI_RRM/config_master.yaml b/openAI_RRM/config_master.yaml index 3946cb1..119b8f7 100644 --- a/openAI_RRM/config_master.yaml +++ b/openAI_RRM/config_master.yaml @@ -17,7 +17,8 @@ control_applications: class_name : UniflexChannelController openAIGymController: True kwargs : { - 'openAI_controller': "openAI_gym.rrm_agent" + 'openAI_controller': "openAI_gym.rrm_agent", + 'numChannels' : 2 } modules: From ed51455c3830485e91a4e0e8956bd2fc7938a952 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sun, 10 Mar 2019 23:09:45 +0100 Subject: [PATCH 16/54] check scenarios --- openAI_RRM/Setting2_2/config_slave.yaml | 28 ++++++++++++++++++++++ openAI_RRM/Setting2_2/config_slave2.yaml | 28 ++++++++++++++++++++++ openAI_RRM/Setting2_2/readme.txt | 14 +++++++++++ openAI_RRM/Setting3_112/config_slave.yaml | 28 ++++++++++++++++++++++ openAI_RRM/Setting3_112/config_slave2.yaml | 28 ++++++++++++++++++++++ openAI_RRM/Setting3_112/config_slave3.yaml | 28 ++++++++++++++++++++++ openAI_RRM/Setting3_112/readme.txt | 15 ++++++++++++ openAI_RRM/Setting3_222/config_slave.yaml | 28 ++++++++++++++++++++++ openAI_RRM/Setting3_222/config_slave2.yaml | 28 ++++++++++++++++++++++ openAI_RRM/Setting3_222/config_slave3.yaml | 28 ++++++++++++++++++++++ openAI_RRM/Setting3_222/readme.txt | 15 ++++++++++++ openAI_RRM/channel_controller.py | 12 +++++----- openAI_RRM/config_master.yaml | 1 - openAI_RRM/config_slave.yaml | 4 ++-- openAI_RRM/config_slave2.yaml | 2 +- openAI_RRM/config_slave3.yaml | 28 ++++++++++++++++++++++ openAI_RRM/readme.txt | 1 + openAI_RRM/rrm_agent.py | 14 +++++++---- 18 files changed, 316 insertions(+), 14 deletions(-) create mode 100644 openAI_RRM/Setting2_2/config_slave.yaml create mode 100644 openAI_RRM/Setting2_2/config_slave2.yaml create mode 100644 openAI_RRM/Setting2_2/readme.txt create mode 100644 openAI_RRM/Setting3_112/config_slave.yaml create mode 100644 openAI_RRM/Setting3_112/config_slave2.yaml create mode 100644 openAI_RRM/Setting3_112/config_slave3.yaml create mode 100644 openAI_RRM/Setting3_112/readme.txt create mode 100644 openAI_RRM/Setting3_222/config_slave.yaml create mode 100644 openAI_RRM/Setting3_222/config_slave2.yaml create mode 100644 openAI_RRM/Setting3_222/config_slave3.yaml create mode 100644 openAI_RRM/Setting3_222/readme.txt create mode 100644 openAI_RRM/config_slave3.yaml diff --git a/openAI_RRM/Setting2_2/config_slave.yaml b/openAI_RRM/Setting2_2/config_slave.yaml new file mode 100644 index 0000000..6207a48 --- /dev/null +++ b/openAI_RRM/Setting2_2/config_slave.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:01', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/Setting2_2/config_slave2.yaml b/openAI_RRM/Setting2_2/config_slave2.yaml new file mode 100644 index 0000000..9073205 --- /dev/null +++ b/openAI_RRM/Setting2_2/config_slave2.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], + 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:02', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/Setting2_2/readme.txt b/openAI_RRM/Setting2_2/readme.txt new file mode 100644 index 0000000..a99904a --- /dev/null +++ b/openAI_RRM/Setting2_2/readme.txt @@ -0,0 +1,14 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +source ~/Uniflex/dev/bin/activate + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +python3 rrm_agent.py --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml +uniflex-agent --config ./config_slave2.yaml + +# For debugging mode run with -v option diff --git a/openAI_RRM/Setting3_112/config_slave.yaml b/openAI_RRM/Setting3_112/config_slave.yaml new file mode 100644 index 0000000..e23a542 --- /dev/null +++ b/openAI_RRM/Setting3_112/config_slave.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01"], + 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:01', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/Setting3_112/config_slave2.yaml b/openAI_RRM/Setting3_112/config_slave2.yaml new file mode 100644 index 0000000..2ba919f --- /dev/null +++ b/openAI_RRM/Setting3_112/config_slave2.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11"], + 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:02', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/Setting3_112/config_slave3.yaml b/openAI_RRM/Setting3_112/config_slave3.yaml new file mode 100644 index 0000000..5ddf209 --- /dev/null +++ b/openAI_RRM/Setting3_112/config_slave3.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], + 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02'], + 'myMAC' : 'aa:aa:aa:aa:aa:03', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/Setting3_112/readme.txt b/openAI_RRM/Setting3_112/readme.txt new file mode 100644 index 0000000..5a22888 --- /dev/null +++ b/openAI_RRM/Setting3_112/readme.txt @@ -0,0 +1,15 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +source ~/Uniflex/dev/bin/activate + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +python3 rrm_agent.py --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml +uniflex-agent --config ./config_slave2.yaml +uniflex-agent --config ./config_slave3.yaml + +# For debugging mode run with -v option diff --git a/openAI_RRM/Setting3_222/config_slave.yaml b/openAI_RRM/Setting3_222/config_slave.yaml new file mode 100644 index 0000000..8827d13 --- /dev/null +++ b/openAI_RRM/Setting3_222/config_slave.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:02'], + 'myMAC' : 'aa:aa:aa:aa:aa:01', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/Setting3_222/config_slave2.yaml b/openAI_RRM/Setting3_222/config_slave2.yaml new file mode 100644 index 0000000..9073205 --- /dev/null +++ b/openAI_RRM/Setting3_222/config_slave2.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], + 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:02', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/Setting3_222/config_slave3.yaml b/openAI_RRM/Setting3_222/config_slave3.yaml new file mode 100644 index 0000000..43b0cce --- /dev/null +++ b/openAI_RRM/Setting3_222/config_slave3.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], + 'neighbors' : ['aa:aa:aa:aa:aa:02'], + 'myMAC' : 'aa:aa:aa:aa:aa:03', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/Setting3_222/readme.txt b/openAI_RRM/Setting3_222/readme.txt new file mode 100644 index 0000000..5a22888 --- /dev/null +++ b/openAI_RRM/Setting3_222/readme.txt @@ -0,0 +1,15 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +source ~/Uniflex/dev/bin/activate + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +python3 rrm_agent.py --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml +uniflex-agent --config ./config_slave2.yaml +uniflex-agent --config ./config_slave3.yaml + +# For debugging mode run with -v option diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 9ab5e47..dcff179 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -386,9 +386,9 @@ def reset(self): def execute_action(self, action): for index, interface in enumerate(self._create_interface_list()): - ifaceaction = int(action / (pow(numChannels,index))) - ifaceaction = ifaceaction % numChannels - self.set_channel(interface['node'], interface['device'], interface['iface'], ifaceaction*numChannels+1, None) + ifaceaction = int(action / (pow(self.numChannels,index))) + ifaceaction = ifaceaction % self.numChannels + self.set_channel(interface['node'], interface['device'], interface['iface'], ifaceaction*self.numChannels+1, None) #try: # for index, actionStep in enumerate(action): # interface = self.actionSpace[index] @@ -402,7 +402,7 @@ def render(): return def get_observationSpace(self): - maxValues = [numChannels for i in self._create_interface_list()] + maxValues = [self.numChannels for i in self._create_interface_list()] #return spaces.Box(low=0, high=numChannels, shape=(len(self._create_interface_list()),0), dtype=numpy.float32) return spaces.MultiDiscrete(maxValues) #spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) @@ -410,11 +410,11 @@ def get_observationSpace(self): def get_actionSpace(self): if len(self._create_interface_list()) == 0: return spaces.Discrete(0) - return spaces.Discrete(pow(numChannels, len(self._create_interface_list()))) + return spaces.Discrete(pow(self.numChannels, len(self._create_interface_list()))) def get_observation(self): channels = self.get_channels() - observation = list(map(lambda x: (x['channel number']-1) / numChannels, channels)) + observation = list(map(lambda x: (x['channel number']-1) / self.numChannels, channels)) return observation # game over if there is a new interface diff --git a/openAI_RRM/config_master.yaml b/openAI_RRM/config_master.yaml index 119b8f7..4fce51f 100644 --- a/openAI_RRM/config_master.yaml +++ b/openAI_RRM/config_master.yaml @@ -17,7 +17,6 @@ control_applications: class_name : UniflexChannelController openAIGymController: True kwargs : { - 'openAI_controller': "openAI_gym.rrm_agent", 'numChannels' : 2 } diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml index 7c07ba5..8827d13 100644 --- a/openAI_RRM/config_slave.yaml +++ b/openAI_RRM/config_slave.yaml @@ -22,7 +22,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], + 'neighbors' : ['aa:aa:aa:aa:aa:02'], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54e6, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} diff --git a/openAI_RRM/config_slave2.yaml b/openAI_RRM/config_slave2.yaml index 7a3f9ce..9073205 100644 --- a/openAI_RRM/config_slave2.yaml +++ b/openAI_RRM/config_slave2.yaml @@ -24,5 +24,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54e6, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} diff --git a/openAI_RRM/config_slave3.yaml b/openAI_RRM/config_slave3.yaml new file mode 100644 index 0000000..43b0cce --- /dev/null +++ b/openAI_RRM/config_slave3.yaml @@ -0,0 +1,28 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], + 'neighbors' : ['aa:aa:aa:aa:aa:02'], + 'myMAC' : 'aa:aa:aa:aa:aa:03', + 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + diff --git a/openAI_RRM/readme.txt b/openAI_RRM/readme.txt index a99904a..5a22888 100644 --- a/openAI_RRM/readme.txt +++ b/openAI_RRM/readme.txt @@ -10,5 +10,6 @@ python3 rrm_agent.py --config ./config_master.yaml # 2c. Run modules in slave node: uniflex-agent --config ./config_slave.yaml uniflex-agent --config ./config_slave2.yaml +uniflex-agent --config ./config_slave3.yaml # For debugging mode run with -v option diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 3851ec8..81ae1e5 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -28,7 +28,8 @@ epsilon = 1.0 # exploration rate epsilon_min = 0.01 -epsilon_decay = 0.99 +#epsilon_decay = 0.99 +epsilon_decay = 0.995 time_history = [] rew_history = [] @@ -38,6 +39,7 @@ while True: run = 0 + runs = [] rewards = [] actions = [] @@ -121,10 +123,14 @@ print ("next step") plt.subplot(211) - plt.plot(rewards) + plt.plot(run, reward, 'bo') # Additional point + plt.ylabel('reward') plt.subplot(212) - for ap in range(0, aps): - plt.plot(actions[ap]) + #for ap in range(0, aps): + # plt.plot(actions[ap]) + plt.plot(run, action, 'bo') # Additional point + plt.ylabel('action') + plt.xlabel('step') plt.pause(0.05) run += 1 From e13e4f3f3f9dfac37f83638e80eae9679033f788 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 8 Apr 2019 13:17:05 +0200 Subject: [PATCH 17/54] addthomsonalgorithm --- openAI_RRM/Setting2_2/common.py | 28 ++++ openAI_RRM/Setting2_2/my_filter.py | 53 ++++++++ openAI_RRM/Setting3_112/common.py | 28 ++++ openAI_RRM/Setting3_112/my_filter.py | 53 ++++++++ openAI_RRM/Setting3_222/common.py | 28 ++++ openAI_RRM/Setting3_222/my_filter.py | 53 ++++++++ openAI_RRM/config_slave.yaml | 28 ---- openAI_RRM/config_slave2.yaml | 28 ---- openAI_RRM/config_slave3.yaml | 28 ---- openAI_RRM/readme.txt | 6 +- openAI_RRM/thomson_agent.py | 184 +++++++++++++++++++++++++++ 11 files changed, 430 insertions(+), 87 deletions(-) create mode 100755 openAI_RRM/Setting2_2/common.py create mode 100755 openAI_RRM/Setting2_2/my_filter.py create mode 100755 openAI_RRM/Setting3_112/common.py create mode 100755 openAI_RRM/Setting3_112/my_filter.py create mode 100755 openAI_RRM/Setting3_222/common.py create mode 100755 openAI_RRM/Setting3_222/my_filter.py delete mode 100644 openAI_RRM/config_slave.yaml delete mode 100644 openAI_RRM/config_slave2.yaml delete mode 100644 openAI_RRM/config_slave3.yaml create mode 100644 openAI_RRM/thomson_agent.py diff --git a/openAI_RRM/Setting2_2/common.py b/openAI_RRM/Setting2_2/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/openAI_RRM/Setting2_2/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/openAI_RRM/Setting2_2/my_filter.py b/openAI_RRM/Setting2_2/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/openAI_RRM/Setting2_2/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/openAI_RRM/Setting3_112/common.py b/openAI_RRM/Setting3_112/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/openAI_RRM/Setting3_112/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/openAI_RRM/Setting3_112/my_filter.py b/openAI_RRM/Setting3_112/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/openAI_RRM/Setting3_112/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/openAI_RRM/Setting3_222/common.py b/openAI_RRM/Setting3_222/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/openAI_RRM/Setting3_222/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/openAI_RRM/Setting3_222/my_filter.py b/openAI_RRM/Setting3_222/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/openAI_RRM/Setting3_222/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml deleted file mode 100644 index 8827d13..0000000 --- a/openAI_RRM/config_slave.yaml +++ /dev/null @@ -1,28 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'HC node' - info: 'filter runs on local node' - iface: 'lo' - -control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - -modules: - simple: - module : uniflex_module_simple - class_name : SimpleModule4 - devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:02'], - 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} - diff --git a/openAI_RRM/config_slave2.yaml b/openAI_RRM/config_slave2.yaml deleted file mode 100644 index 9073205..0000000 --- a/openAI_RRM/config_slave2.yaml +++ /dev/null @@ -1,28 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'HC node' - info: 'filter runs on local node' - iface: 'lo' - -control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - -modules: - simple: - module : uniflex_module_simple - class_name : SimpleModule4 - devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], - 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], - 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} - diff --git a/openAI_RRM/config_slave3.yaml b/openAI_RRM/config_slave3.yaml deleted file mode 100644 index 43b0cce..0000000 --- a/openAI_RRM/config_slave3.yaml +++ /dev/null @@ -1,28 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'HC node' - info: 'filter runs on local node' - iface: 'lo' - -control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - -modules: - simple: - module : uniflex_module_simple - class_name : SimpleModule4 - devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], - 'neighbors' : ['aa:aa:aa:aa:aa:02'], - 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} - diff --git a/openAI_RRM/readme.txt b/openAI_RRM/readme.txt index 5a22888..83ab8ba 100644 --- a/openAI_RRM/readme.txt +++ b/openAI_RRM/readme.txt @@ -8,8 +8,8 @@ uniflex-broker # 2b. Run control program in master node: python3 rrm_agent.py --config ./config_master.yaml # 2c. Run modules in slave node: -uniflex-agent --config ./config_slave.yaml -uniflex-agent --config ./config_slave2.yaml -uniflex-agent --config ./config_slave3.yaml +uniflex-agent --config ./Setting2_2/config_slave.yaml +uniflex-agent --config ./Setting2_2/config_slave2.yaml +uniflex-agent --config ./Setting2_2/config_slave3.yaml # For debugging mode run with -v option diff --git a/openAI_RRM/thomson_agent.py b/openAI_RRM/thomson_agent.py new file mode 100644 index 0000000..903fbc7 --- /dev/null +++ b/openAI_RRM/thomson_agent.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +import tensorflow as tf +import tensorflow.contrib.slim as slim +import numpy as np +#from tensorflow import keras +import argparse +import logging +import time +import matplotlib.pyplot as plt +from math import * + + +parser = argparse.ArgumentParser(description='Uniflex reader') +parser.add_argument('--config', help='path to the uniflex config file', default=None) +args = parser.parse_args() +if not args.config: + print("No config file specified!") + quit() + +#create uniflex environment, steptime is 10sec +env = gym.make('uniflex-v0') +#env.configure() +env.start_controller(steptime=1, config=args.config) + +epsilon = 1.0 # exploration rate +epsilon_min = 0.01 +#epsilon_decay = 0.99 +epsilon_decay = 0.995 + +time_history = [] +rew_history = [] + +numChannels = 2 +episode = 0 + +while True: + run = 0 + runs = [] + rewards = [] + actions = [] + + state = env.reset() + n = 0 + ac_space = env.action_space + ob_space = env.observation_space + print("Observation space: ", ob_space, ob_space.dtype) + print("Action space: ", ac_space, ac_space.n) + + a_size = int(ac_space.n) + + avg = [] + num = [] + maxreward = 1 + + done = False + + if a_size == 0: + print("there is no vaild AP - sleep 10 seconds") + time.sleep(2) + continue + + aps = int(log(a_size, numChannels)) + + for i in range(a_size): + avg.append(0) + num.append(0) + + while not done: + # generate random values + randval = [] + for i in range(a_size): + randval.append(np.random.normal(avg[i]/maxreward, 1/(num[i] + 1), 1)) + + #take index of highest value + action = np.argmax(randval) + + #execute step + next_state, reward, done, _ = env.step(action) + + # add reward for further execution + avg[action] = (avg[action] * num[action] + reward) / (num[action] + 2) + num[action] += 1 + + maxreward = np.maximum(maxreward, reward) + + # statistics + rewards.append(reward) + + for ap in range(0, aps): + ifaceaction = int(action / (pow(numChannels, ap))) + ifaceaction = ifaceaction % numChannels + #actions[ap].append(ifaceaction) + + print ("Reward: " + str(reward)) + print ("GameOver: " + str(done)) + print ("Next Channels: " + str(next_state)) + print ("Channel selection:" + str(action)) + print ("Average:" + str(avg)) + print ("next step") + + plt.subplot(211) + plt.plot(run, reward, 'bo') # Additional point + plt.ylabel('reward') + plt.subplot(212) + #for ap in range(0, aps): + # plt.plot(actions[ap]) + plt.plot(run, action, 'bo') # Additional point + plt.ylabel('action') + plt.xlabel('step') + plt.pause(0.05) + + run += 1 + + episode += 1 + + +''' +ob_space = env.observation_space +ac_space = env.action_space +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.n) + +s_size = ob_space.shape[0] +a_size = ac_space.n +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) + +total_episodes = 200 +max_env_steps = 100 +env._max_episode_steps = max_env_steps + +epsilon = 1.0 # exploration rate +epsilon_min = 0.01 +epsilon_decay = 0.999 + +time_history = [] +rew_history = [] + +for e in range(total_episodes): + + state = env.reset() + state = np.reshape(state, [1, s_size]) + rewardsum = 0 + for time in range(max_env_steps): + # Choose action + if np.random.rand(1) < epsilon: + action = np.random.randint(a_size) + else: + action = np.argmax(model.predict(state)[0]) + + # Step + next_state, reward, done, _ = env.step(action) + + if done: + print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" + .format(e, total_episodes, time, rewardsum, epsilon)) + break + + next_state = np.reshape(next_state, [1, s_size]) + + # Train + target = reward + if not done: + target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) + + target_f = model.predict(state) + target_f[0][action] = target + model.fit(state, target_f, epochs=1, verbose=0) + + state = next_state + rewardsum += reward + if epsilon > epsilon_min: epsilon *= epsilon_decay + + time_history.append(time) + rew_history.append(rewardsum) +''' From 7400d61932a3941a3221427f613de691b4e44291 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 15 Apr 2019 19:07:04 +0200 Subject: [PATCH 18/54] add channel list and csv export --- openAI_RRM/channel_controller.py | 28 ++++++++++++++++++---------- openAI_RRM/config_master.yaml | 2 +- openAI_RRM/rrm_agent.py | 11 +++++++++++ 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index dcff179..a3efe85 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -2,6 +2,7 @@ import datetime import random import numpy +import sys from math import * from functools import reduce @@ -31,6 +32,7 @@ class UniflexChannelController(modules.ControlApplication, UniFlexController): def __init__(self,**kwargs): super(UniflexChannelController, self).__init__() self.log = logging.getLogger('ChannelController') + logging.basicConfig(stream=sys.stdout, level=logging.INFO) self.running = False self.timeInterval = 10 @@ -39,14 +41,14 @@ def __init__(self,**kwargs): self.packetLossEventsEnabled = False self.channel = 1 - self.numChannels = 2 + self.availableChannels = [] self.observationSpace = [] self.registeredClients = self._create_client_list() self.lastObservation = [] self.actionSet = [] - if 'numChannels' in kwargs: - self.numChannels = kwargs['numChannels'] + if 'availableChannels' in kwargs: + self.availableChannels = kwargs['availableChannels'] # if not "openAI_controller" in kwargs: # raise ValueError("There is no OpenAI gym controller specified. Can not #find \"" + "openAI_controller" + "\" as kwargs in the config file.") @@ -386,9 +388,10 @@ def reset(self): def execute_action(self, action): for index, interface in enumerate(self._create_interface_list()): - ifaceaction = int(action / (pow(self.numChannels,index))) - ifaceaction = ifaceaction % self.numChannels - self.set_channel(interface['node'], interface['device'], interface['iface'], ifaceaction*self.numChannels+1, None) + ifaceaction = int(action / (pow(len(self.availableChannels),index))) + ifaceaction = ifaceaction % len(self.availableChannels) + self.set_channel(interface['node'], interface['device'], interface['iface'], + self.availableChannels[ifaceaction], None) #try: # for index, actionStep in enumerate(action): # interface = self.actionSpace[index] @@ -402,19 +405,24 @@ def render(): return def get_observationSpace(self): - maxValues = [self.numChannels for i in self._create_interface_list()] + maxValues = [len(self.availableChannels) for i in self._create_interface_list()] #return spaces.Box(low=0, high=numChannels, shape=(len(self._create_interface_list()),0), dtype=numpy.float32) return spaces.MultiDiscrete(maxValues) #spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) def get_actionSpace(self): - if len(self._create_interface_list()) == 0: + interfaceList = self._create_interface_list(); + if(len(interfaceList) > 0): + self.log.info("UUIDs of the action space") + for key, interface in enumerate(interfaceList): + self.log.info(str(key) + ":" + interface['device']) + if len(interfaceList) == 0: return spaces.Discrete(0) - return spaces.Discrete(pow(self.numChannels, len(self._create_interface_list()))) + return spaces.Discrete(pow(len(self.availableChannels), len(interfaceList))) def get_observation(self): channels = self.get_channels() - observation = list(map(lambda x: (x['channel number']-1) / self.numChannels, channels)) + observation = list(map(lambda x: x['channel number'], channels)) return observation # game over if there is a new interface diff --git a/openAI_RRM/config_master.yaml b/openAI_RRM/config_master.yaml index 4fce51f..f95d888 100644 --- a/openAI_RRM/config_master.yaml +++ b/openAI_RRM/config_master.yaml @@ -17,7 +17,7 @@ control_applications: class_name : UniflexChannelController openAIGymController: True kwargs : { - 'numChannels' : 2 + 'availableChannels' : [1,5] } modules: diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 81ae1e5..969710c 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -10,16 +10,20 @@ import argparse import logging import time +import csv import matplotlib.pyplot as plt from math import * parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--output', help='path to a csv file for agent output data', default=None) args = parser.parse_args() if not args.config: print("No config file specified!") quit() +if not args.output: + print("No output file specified! - Skip data") #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') @@ -111,6 +115,13 @@ rewards.append(reward) + + if args.output: + with open(args.output, 'a') as csvFile: + writer = csv.writer(csvFile) + writer.writerow([reward, action]) + csvFile.close() + for ap in range(0, aps): ifaceaction = int(action / (pow(numChannels, ap))) ifaceaction = ifaceaction % numChannels From 2a884feaaa343fe21cde45779b999e115f40403a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 16 Apr 2019 18:45:33 +0200 Subject: [PATCH 19/54] split for simulation --- .../Setting2_2/common.py | 0 .../Setting2_2/config_slave.yaml | 0 .../Setting2_2/config_slave2.yaml | 0 .../Setting2_2/my_filter.py | 0 .../Setting2_2/readme.txt | 0 .../Setting3_112/common.py | 0 .../Setting3_112/config_slave.yaml | 0 .../Setting3_112/config_slave2.yaml | 0 .../Setting3_112/config_slave3.yaml | 0 .../Setting3_112/my_filter.py | 0 .../Setting3_112/readme.txt | 0 .../Setting3_222/common.py | 0 .../Setting3_222/config_slave.yaml | 0 .../Setting3_222/config_slave2.yaml | 0 .../Setting3_222/config_slave3.yaml | 0 .../Setting3_222/my_filter.py | 0 .../Setting3_222/readme.txt | 0 openAI_RRM/channel_controller.py | 10 ++++-- openAI_RRM/config_master_simulation.yaml | 32 +++++++++++++++++++ openAI_RRM/config_slave.yaml | 25 +++++++++++++++ openAI_RRM/readme.txt | 14 ++++---- 21 files changed, 73 insertions(+), 8 deletions(-) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting2_2/common.py (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting2_2/config_slave.yaml (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting2_2/config_slave2.yaml (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting2_2/my_filter.py (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting2_2/readme.txt (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_112/common.py (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_112/config_slave.yaml (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_112/config_slave2.yaml (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_112/config_slave3.yaml (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_112/my_filter.py (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_112/readme.txt (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_222/common.py (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_222/config_slave.yaml (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_222/config_slave2.yaml (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_222/config_slave3.yaml (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_222/my_filter.py (100%) rename openAI_RRM/{ => SimulationSlavesConfig}/Setting3_222/readme.txt (100%) create mode 100644 openAI_RRM/config_master_simulation.yaml create mode 100644 openAI_RRM/config_slave.yaml diff --git a/openAI_RRM/Setting2_2/common.py b/openAI_RRM/SimulationSlavesConfig/Setting2_2/common.py similarity index 100% rename from openAI_RRM/Setting2_2/common.py rename to openAI_RRM/SimulationSlavesConfig/Setting2_2/common.py diff --git a/openAI_RRM/Setting2_2/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml similarity index 100% rename from openAI_RRM/Setting2_2/config_slave.yaml rename to openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml diff --git a/openAI_RRM/Setting2_2/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml similarity index 100% rename from openAI_RRM/Setting2_2/config_slave2.yaml rename to openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml diff --git a/openAI_RRM/Setting2_2/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting2_2/my_filter.py similarity index 100% rename from openAI_RRM/Setting2_2/my_filter.py rename to openAI_RRM/SimulationSlavesConfig/Setting2_2/my_filter.py diff --git a/openAI_RRM/Setting2_2/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt similarity index 100% rename from openAI_RRM/Setting2_2/readme.txt rename to openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt diff --git a/openAI_RRM/Setting3_112/common.py b/openAI_RRM/SimulationSlavesConfig/Setting3_112/common.py similarity index 100% rename from openAI_RRM/Setting3_112/common.py rename to openAI_RRM/SimulationSlavesConfig/Setting3_112/common.py diff --git a/openAI_RRM/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml similarity index 100% rename from openAI_RRM/Setting3_112/config_slave.yaml rename to openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml diff --git a/openAI_RRM/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml similarity index 100% rename from openAI_RRM/Setting3_112/config_slave2.yaml rename to openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml diff --git a/openAI_RRM/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml similarity index 100% rename from openAI_RRM/Setting3_112/config_slave3.yaml rename to openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml diff --git a/openAI_RRM/Setting3_112/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting3_112/my_filter.py similarity index 100% rename from openAI_RRM/Setting3_112/my_filter.py rename to openAI_RRM/SimulationSlavesConfig/Setting3_112/my_filter.py diff --git a/openAI_RRM/Setting3_112/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt similarity index 100% rename from openAI_RRM/Setting3_112/readme.txt rename to openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt diff --git a/openAI_RRM/Setting3_222/common.py b/openAI_RRM/SimulationSlavesConfig/Setting3_222/common.py similarity index 100% rename from openAI_RRM/Setting3_222/common.py rename to openAI_RRM/SimulationSlavesConfig/Setting3_222/common.py diff --git a/openAI_RRM/Setting3_222/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml similarity index 100% rename from openAI_RRM/Setting3_222/config_slave.yaml rename to openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml diff --git a/openAI_RRM/Setting3_222/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml similarity index 100% rename from openAI_RRM/Setting3_222/config_slave2.yaml rename to openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml diff --git a/openAI_RRM/Setting3_222/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml similarity index 100% rename from openAI_RRM/Setting3_222/config_slave3.yaml rename to openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml diff --git a/openAI_RRM/Setting3_222/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting3_222/my_filter.py similarity index 100% rename from openAI_RRM/Setting3_222/my_filter.py rename to openAI_RRM/SimulationSlavesConfig/Setting3_222/my_filter.py diff --git a/openAI_RRM/Setting3_222/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt similarity index 100% rename from openAI_RRM/Setting3_222/readme.txt rename to openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index a3efe85..e4aef2d 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -46,10 +46,14 @@ def __init__(self,**kwargs): self.registeredClients = self._create_client_list() self.lastObservation = [] self.actionSet = [] + self.simulation = False if 'availableChannels' in kwargs: self.availableChannels = kwargs['availableChannels'] + if 'simulation' in kwargs: + self.simulation = kwargs['simulation'] + # if not "openAI_controller" in kwargs: # raise ValueError("There is no OpenAI gym controller specified. Can not #find \"" + "openAI_controller" + "\" as kwargs in the config file.") # else: @@ -382,7 +386,8 @@ def reset(self): if channel > 12: channel = 1 # clear bandwidth counter - self.simulate_flows() + if(self.simulation): + self.simulate_flows() self.get_bandwidth() return @@ -434,7 +439,8 @@ def get_gameOver(self): def get_reward(self): # for simulation - self.simulate_flows() + if(self.simulation): + self.simulate_flows() bandwidthList = self.get_bandwidth() #bandwidth = sorted(bandwidth, key=lambda k: k['mac']) diff --git a/openAI_RRM/config_master_simulation.yaml b/openAI_RRM/config_master_simulation.yaml new file mode 100644 index 0000000..fa2339f --- /dev/null +++ b/openAI_RRM/config_master_simulation.yaml @@ -0,0 +1,32 @@ +## UniFlex Agent config file + +config: + name: "Global_Controller" + info: 'agent hosts global controller' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xpub: "tcp://127.0.0.1:8990" + xsub: "tcp://127.0.0.1:8989" + +control_applications: + myController: + file : channel_controller.py + class_name : UniflexChannelController + openAIGymController: True + kwargs : { + 'availableChannels' : [1,5], + 'simulation': True + } + +modules: + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoveryMasterModule + kwargs: {"iface":"lo", + "groupName":"uniflex_1234", + "sub":"tcp://127.0.0.1:8990", + "pub":"tcp://127.0.0.1:8989" + } diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml new file mode 100644 index 0000000..247e6eb --- /dev/null +++ b/openAI_RRM/config_slave.yaml @@ -0,0 +1,25 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_wifi + class_name : WifiModule + devices : ['phy0'] + kwargs : {} + diff --git a/openAI_RRM/readme.txt b/openAI_RRM/readme.txt index 83ab8ba..ce749c4 100644 --- a/openAI_RRM/readme.txt +++ b/openAI_RRM/readme.txt @@ -1,15 +1,17 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - +# Start environment if Uniflex is installed in some source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: python3 rrm_agent.py --config ./config_master.yaml +python3 rrm_agent.py --config ./config_master_simulation.yaml # 2c. Run modules in slave node: -uniflex-agent --config ./Setting2_2/config_slave.yaml -uniflex-agent --config ./Setting2_2/config_slave2.yaml -uniflex-agent --config ./Setting2_2/config_slave3.yaml +#Linux WiFi AP +uniflex-agent --config ./config_slave.yaml +#Simulation +uniflex-agent --config ./SimulationSlavesConfig/Setting2_2/config_slave.yaml +uniflex-agent --config ./SimulationSlavesConfig/Setting2_2/config_slave2.yaml +uniflex-agent --config ./SimulationSlavesConfig/Setting2_2/config_slave3.yaml # For debugging mode run with -v option From d6e652761a3ac63fd1253d8c1e6eb59792c70b2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Fri, 19 Apr 2019 14:05:53 +0200 Subject: [PATCH 20/54] exprot csv data --- openAI_RRM/thomson_agent.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/openAI_RRM/thomson_agent.py b/openAI_RRM/thomson_agent.py index 903fbc7..211fe7e 100644 --- a/openAI_RRM/thomson_agent.py +++ b/openAI_RRM/thomson_agent.py @@ -10,16 +10,20 @@ import argparse import logging import time +import csv import matplotlib.pyplot as plt from math import * parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--output', help='path to a csv file for agent output data', default=None) args = parser.parse_args() if not args.config: print("No config file specified!") quit() +if not args.output: + print("No output file specified! - Skip data") #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') @@ -90,6 +94,12 @@ # statistics rewards.append(reward) + if args.output: + with open(args.output, 'a') as csvFile: + writer = csv.writer(csvFile) + writer.writerow([reward, action]) + csvFile.close() + for ap in range(0, aps): ifaceaction = int(action / (pow(numChannels, ap))) ifaceaction = ifaceaction % numChannels From dfadb3cf69ecc5c018b74ecea2f3519af4cd761d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sat, 20 Apr 2019 18:47:34 +0200 Subject: [PATCH 21/54] config for dist file --- openAI_RRM/config_slave.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml index 247e6eb..70c74f1 100644 --- a/openAI_RRM/config_slave.yaml +++ b/openAI_RRM/config_slave.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xpub: "tcp://127.0.0.1:8990" + xsub: "tcp://127.0.0.1:8989" control_applications: myFilter: From 68af1d9c2db755f6dffc5b897c7265021bee8dd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Thu, 25 Apr 2019 17:24:48 +0200 Subject: [PATCH 22/54] broker is nuc4 --- openAI_RRM/config_slave.yaml | 8 +- openAI_RRM/data_20190419/readme | 37 + openAI_RRM/data_20190419/rrmagent_22_1.csv | 338 +++++++++ openAI_RRM/data_20190419/rrmagent_3112.csv | 641 +++++++++++++++++ openAI_RRM/data_20190419/rrmagent_3112_3.csv | 649 ++++++++++++++++++ openAI_RRM/data_20190419/rrmagent_3222_1.csv | 410 +++++++++++ openAI_RRM/data_20190419/rrmagent_3222_2.csv | 361 ++++++++++ openAI_RRM/data_20190419/thomagent_3112.csv | 630 +++++++++++++++++ .../data_20190419/thomsonagent_22_1.csv | 357 ++++++++++ .../data_20190419/thomsonagent_3222.csv | 376 ++++++++++ 10 files changed, 3803 insertions(+), 4 deletions(-) create mode 100644 openAI_RRM/data_20190419/readme create mode 100644 openAI_RRM/data_20190419/rrmagent_22_1.csv create mode 100644 openAI_RRM/data_20190419/rrmagent_3112.csv create mode 100644 openAI_RRM/data_20190419/rrmagent_3112_3.csv create mode 100644 openAI_RRM/data_20190419/rrmagent_3222_1.csv create mode 100644 openAI_RRM/data_20190419/rrmagent_3222_2.csv create mode 100644 openAI_RRM/data_20190419/thomagent_3112.csv create mode 100644 openAI_RRM/data_20190419/thomsonagent_22_1.csv create mode 100644 openAI_RRM/data_20190419/thomsonagent_3222.csv diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml index 70c74f1..f005370 100644 --- a/openAI_RRM/config_slave.yaml +++ b/openAI_RRM/config_slave.yaml @@ -5,12 +5,12 @@ config: info: 'filter runs on local node' iface: 'lo' iface: 'lo' - sub: "tcp://127.0.0.1:8990" - pub: "tcp://127.0.0.1:8989" + sub: "tcp://192.168.10.157:8990" + pub: "tcp://192.168.10.157:8989" broker: - xpub: "tcp://127.0.0.1:8990" - xsub: "tcp://127.0.0.1:8989" + xpub: "tcp://192.168.10.157:8990" + xsub: "tcp://192.168.10.157:8989" control_applications: myFilter: diff --git a/openAI_RRM/data_20190419/readme b/openAI_RRM/data_20190419/readme new file mode 100644 index 0000000..289a7a4 --- /dev/null +++ b/openAI_RRM/data_20190419/readme @@ -0,0 +1,37 @@ +Experiment 1.Stelle 2.Stelle 3.Stelle Erfolg +rrm_22_1 AP2 AP1 Ja +thom_22_1 AP2 AP1 Ja +rrm_3222 AP3 AP1 AP2 +rrm_3222_2 AP3 AP1 AP2 Ja +thom_3222 AP3 AP2 AP1 Ja +rrm_3112 AP3 AP2 AP1 Nein +rrm_3112_2 AP1 AP2 AP3 Nein +thom_3112 AP3 AP2 AP1 Ja +rrm_3112_3 AP2 AP3 AP1 Ja + +Setup 22 +AP1 = 8052dedf-e186-46fa-81c3-6ef20e3f022c +AP2 = 5c1c1270-e2bf-4fd1-a2e3-3080c41dd700 + +Setup 3_222 1. Experiment +AP1 = 89c12af0-0eaa-4b35-95ff-f351d5b576e7 +AP2 = 604c186b-4e00-4e69-a2db-6d23447e9d7c +AP3 = 4268dc71-710d-455f-ac8f-17a827fc9f96 + +Setup 3_222 2. Experiment +AP1 = 8a26f145-a736-4784-b311-e09c7438bed2 +AP2 = 52433dc2-b784-45fa-b2a7-2418e9d4d7c0 +AP3 = 92a6d556-bf0b-4bfb-b0a5-6c2fdf758931 + +Setup 3_112 1. Experiment +AP1 = 0bccab73-2c39-407a-b4e7-ee05e772cfec +AP2 = 7529367c-2fa5-4932-a7d0-666735d43d2d +AP3 = 6f47dce9-11a4-4b2c-8b92-12dbf104ac33 + +Setup 3_112 2. Experiment +AP1 = f3667375-4fa6-408d-b56a-783478c92dda +AP2 = 26515431-cc58-45c1-810f-39ac766559f3 +AP3 = a6b812a1-f199-4b2f-a2c5-3e1a1831095c + +Kanäle 1,5 + diff --git a/openAI_RRM/data_20190419/rrmagent_22_1.csv b/openAI_RRM/data_20190419/rrmagent_22_1.csv new file mode 100644 index 0000000..a9c1e6a --- /dev/null +++ b/openAI_RRM/data_20190419/rrmagent_22_1.csv @@ -0,0 +1,338 @@ +6.968643339234087,2 +4.877340252097936,3 +6.894486594503971,2 +4.829809671219739,0 +5.025592454625421,0 +6.80486489797023,1 +6.500577186643617,2 +4.849531907866088,3 +5.0654168120928365,3 +4.928014473826464,3 +4.884206440927315,3 +6.823913201397861,2 +4.827791397754525,3 +4.710968682020747,0 +4.725494201479047,3 +4.6772298648356445,0 +4.971634498050561,0 +6.734628669641348,1 +6.79381191195557,2 +4.916999928356259,3 +6.777327164411419,2 +4.785054535634852,3 +4.668890283605787,0 +6.659677808303656,2 +6.573804989583071,1 +4.840337352734287,3 +6.674679458111923,1 +6.546318109013128,2 +7.0548103277975525,2 +6.648963612882398,1 +4.939050565760155,3 +6.746748765316303,2 +7.126035060886703,2 +4.891647646109325,0 +6.9516003435488125,2 +6.666285651572075,1 +6.632430634910994,2 +6.889853767492763,2 +7.085690357700651,2 +4.699101634384928,3 +6.637868493469754,2 +7.194799179402078,2 +4.729267738351576,0 +4.646291652109486,3 +7.046479016628102,2 +6.382961019959405,1 +6.623067984282345,2 +6.475330639586478,1 +4.847108723132625,0 +6.757747384927922,1 +6.612215024235049,2 +4.887159500981526,0 +6.7681333364099086,1 +6.392783034634185,2 +4.8235575376641915,0 +6.635191843943176,2 +7.061104250860142,2 +4.825520768653243,0 +6.658124978393555,2 +6.873119464410044,2 +4.829193485615847,0 +6.816956393488488,2 +7.348078859141863,2 +7.190509995978846,2 +6.598472548056792,1 +6.618930307898433,2 +6.555433151026691,1 +6.996693716625735,1 +4.74573244351075,0 +6.969089045677698,2 +6.913747833532728,2 +6.935699065471702,2 +4.886310241395686,3 +5.063963543365806,3 +6.865671251885623,1 +4.851177558792399,3 +6.772657103600182,2 +6.977968406655536,2 +7.080783280679432,2 +7.174101049587835,2 +4.920430285351007,3 +6.878660281342454,2 +4.9223665650227355,0 +6.706509956584688,2 +6.489952360068629,1 +6.414588460610277,2 +7.118026227021735,2 +6.463803113236342,1 +6.6291784186262035,2 +6.899788881249748,2 +7.2864317905635705,2 +4.857210603224528,0 +6.772191109731639,1 +7.138453523981743,1 +6.536643947786893,2 +4.91954758732016,0 +7.048022771804469,2 +7.1481066539469165,2 +4.853762378713745,3 +6.867822483406982,2 +4.854438805676736,0 +4.6990289552568685,3 +6.942845794675865,2 +4.814115266294784,3 +4.648821123685326,0 +6.7816863689134514,2 +7.1827109040931205,2 +6.998684379219938,2 +4.823120195312265,3 +6.870245593470276,2 +7.005422003770458,2 +7.10160366288154,2 +4.743335626562599,3 +6.890518645053367,1 +4.910143805147186,0 +6.737654296082274,2 +6.831435331211273,2 +4.773715740657117,0 +5.030927248125971,0 +6.830290986841219,2 +7.132640642938056,2 +7.012116058149708,2 +7.034015889432277,2 +4.875144612358995,0 +4.989051541358437,0 +6.760353113642177,2 +6.5487112243726076,1 +6.520886288454751,2 +7.070408028675438,2 +6.606899313434736,1 +6.402962347926719,2 +4.702202211294271,0 +6.891731604462052,2 +4.752448447919154,0 +6.864958652102934,2 +6.855078935651558,2 +4.905512048107439,3 +6.783473624652729,2 +7.201016187093753,2 +7.051253054127781,2 +7.0680783915912455,2 +7.004368286139357,2 +7.141818835733709,2 +4.897877672956865,3 +6.661111733005551,2 +4.9928829901804805,0 +6.777076182365991,2 +6.452636089576176,1 +6.625842294192408,2 +7.001313472954859,2 +6.6631872624987185,1 +6.654628248657436,2 +7.009231604900202,2 +7.049048846494447,2 +4.966995864947118,0 +6.936887654507816,1 +6.592492443618869,2 +7.179452609410091,2 +7.190989514642377,2 +7.082841905052061,2 +7.191275220247753,2 +4.817912399655011,3 +6.83713312008293,2 +4.722326983159793,0 +6.710742039869401,2 +4.743531144429287,0 +6.6378328722729885,2 +6.583251087299078,1 +6.7140441240083755,2 +7.109080841828271,2 +7.258536038831862,2 +4.913052942189178,3 +7.01482149395946,2 +6.919169689189318,2 +7.110053530122957,2 +6.981844464530859,2 +7.059908941702666,2 +4.789776862904222,0 +6.680802123693604,2 +7.051842600816948,2 +4.936685534905964,3 +4.685305792112524,0 +6.915744774907733,2 +7.214228174234699,2 +7.060875154171194,2 +4.806483676969002,0 +4.9715286396636245,0 +6.860747440488777,2 +7.138472303395467,2 +4.835968790285445,0 +6.685903933027552,2 +7.1074048304785595,2 +7.056817810488039,2 +6.420762907490318,1 +7.009962624457808,1 +6.628189707434896,2 +4.830593329626463,3 +4.954873836652197,3 +6.65825643070384,2 +4.792722050542475,0 +6.793214190594365,2 +7.039737791980063,2 +4.744820571819737,0 +4.8205618610947605,3 +4.684542979132895,0 +6.867747233324013,2 +6.878029638148211,2 +6.563539819209222,1 +7.212530057749915,1 +6.77509775604327,2 +7.30031520142915,2 +6.848728274656735,2 +7.056571973972035,2 +6.445156583932425,1 +6.648560685289599,2 +7.0115084102559795,2 +7.185117669485082,2 +6.741130317286606,1 +6.501099436849239,2 +5.030723615305958,0 +5.093808830092948,0 +6.72786551418672,2 +7.016205597664731,2 +4.860231948679554,3 +6.7164589995306185,2 +7.082702091898946,2 +7.162307678918578,2 +7.00180080384133,2 +4.702571251143576,3 +6.783500950309879,2 +7.0984519319460295,2 +6.948196638438487,2 +7.177266530985688,2 +7.10487170553006,2 +6.896338684579194,2 +6.964668263600719,2 +4.838488965787582,3 +6.931169502477494,2 +7.012010591875666,2 +6.9305120390077395,2 +7.01886499209714,2 +6.686300358378705,1 +6.478197250800746,2 +7.108981985394132,2 +4.8704101271040985,3 +6.8589890963990765,2 +4.786408524653583,0 +5.10362913849168,0 +6.89184767895632,2 +7.040603144969317,2 +4.807630653126563,3 +4.6238175122393885,0 +4.574610403279715,3 +6.838592440835193,2 +7.188905110462382,2 +7.092075428290929,2 +6.965890611010906,2 +6.8410176009886765,2 +6.879959611672154,2 +7.137476307195644,2 +7.191988430997359,2 +6.907463492826523,2 +6.932056661460609,2 +6.942916038460171,2 +6.877741995221439,2 +7.045372205692875,2 +7.0787385444704505,2 +6.756599961962838,2 +7.176309154919773,2 +7.15220093554735,2 +7.235784070661875,2 +6.8821976020033135,2 +7.213369089809258,2 +7.206141927882715,2 +6.889058360718991,2 +7.090830174847597,2 +7.163511595022865,2 +6.978910620547534,2 +6.991615356945119,2 +7.161782227243371,2 +7.155438984308994,2 +6.984049761326135,2 +4.745353438497089,3 +6.793316714332721,2 +7.131776451117646,2 +6.562429813286526,1 +6.438395104821105,2 +7.006833196411003,2 +6.904562201316457,2 +6.917422612028198,2 +7.289732508558295,2 +7.071289841728291,2 +6.9969520532745175,2 +7.037187193073474,2 +4.772903220397086,0 +6.70843209380403,2 +6.899288651070654,2 +7.194909057312767,2 +6.763903960744844,2 +7.16577586627564,2 +7.224529931166564,2 +6.915155959260374,2 +6.645465888444863,1 +6.452513258409276,2 +6.799732876507319,2 +6.860994050302957,2 +7.159079252396944,2 +7.09834688734944,2 +6.968707828566198,2 +7.076907124357614,2 +6.89088874293343,2 +7.030734053034645,2 +6.977380208522623,2 +7.178285056710084,2 +7.048217163193621,2 +7.220657465641554,2 +7.220435049436793,2 +7.133472358246626,2 +7.2212033767085035,2 +7.197598410186831,2 +7.041386395567242,2 +7.049590436975319,2 +7.074619620358661,2 +7.109738928371129,2 +7.142986620071055,2 +7.045726014879667,2 +7.092335092969319,2 +7.038349511691561,2 +7.168301496672241,2 +6.90746412484504,2 +6.946230283683841,2 +7.069570089106983,2 +7.150555964667147,2 +6.9213233969319505,2 +6.872091106506848,2 +7.065164546813956,2 +7.151857625837071,2 +6.854066779931638,2 diff --git a/openAI_RRM/data_20190419/rrmagent_3112.csv b/openAI_RRM/data_20190419/rrmagent_3112.csv new file mode 100644 index 0000000..408f996 --- /dev/null +++ b/openAI_RRM/data_20190419/rrmagent_3112.csv @@ -0,0 +1,641 @@ +4.636890087010754,7 +6.7799893316953845,6 +6.459088955705708,4 +6.737466573405607,4 +4.622919332688427,7 +6.454850869677397,5 +6.3757979342637325,3 +4.6067086084799325,0 +4.916798604753089,0 +6.994094441982819,6 +6.636033116666916,3 +6.676290717409006,3 +6.509528433300335,5 +6.764079602315745,1 +6.6329234575558695,2 +6.744756201716382,6 +4.781002888853598,0 +6.615589297246637,3 +6.874998734332087,3 +6.613308947332084,5 +6.358291524087188,4 +6.6388000788720785,4 +6.4916776488646395,2 +4.902356258451068,0 +6.735080351388777,6 +6.7731126945291455,2 +6.400480311260973,5 +4.732138080266287,0 +5.049165950277201,0 +4.6647733435089584,7 +4.624783412830145,0 +6.5024580372600465,2 +4.627272198674834,7 +6.645363868686492,3 +6.751042323856252,6 +6.51111985345798,5 +6.333852721628196,2 +4.609642123320378,7 +6.594991773471231,3 +6.878649504019797,3 +6.806712243280641,6 +7.156634187757507,6 +6.701943608139835,1 +4.707173388556988,7 +5.0451397351512774,7 +6.4818417629515555,4 +6.786135751230505,4 +7.050229242196158,6 +6.555112142283533,2 +6.928178754008459,2 +4.806001451218146,7 +6.514877732247445,4 +6.452351976564043,2 +4.778554896985806,7 +6.51798685812305,4 +6.526290220484503,5 +6.913833832352378,4 +6.879516280967922,4 +6.930145339454475,4 +4.877088013042241,0 +4.598945061631151,7 +6.342474985037088,4 +6.904648665309053,4 +6.293000318118076,3 +6.63975482026757,6 +6.527955387644473,3 +6.816215075873159,3 +6.713273857431792,2 +6.658073381213166,3 +6.81064409164542,6 +6.921344886097386,2 +6.397977942125616,3 +6.831977048727669,6 +6.553330568820461,1 +6.562913150569115,6 +6.708146790865019,2 +4.744719445987243,7 +6.867621605351927,6 +4.712822617001235,7 +6.596809414930148,3 +6.863458753571094,6 +7.205518211587517,6 +4.753668627081319,0 +6.612750526405767,2 +4.743253140430196,7 +6.452242752442089,4 +6.759645934800799,6 +4.7179688779429245,0 +4.895529721760933,0 +7.093671617768086,6 +6.790074222447813,4 +6.722563703473493,4 +6.5056469097177745,2 +6.717148078769022,1 +6.610660512665452,6 +6.51441649720159,4 +6.665153358117918,4 +6.969866331321373,1 +6.715169202053958,3 +4.634713700159363,0 +6.679126768291417,6 +6.511634505827729,1 +6.3366121708030025,4 +6.58380917629667,2 +4.7082623506159935,7 +6.908149571503203,6 +6.4955561910074735,5 +6.647220125538881,4 +6.914165805780529,4 +4.609283622207793,7 +6.567466303083183,2 +4.678539862768614,7 +6.604524545811169,5 +4.745245800139197,0 +6.689496432230957,6 +4.79834785016499,0 +6.574621106349144,2 +4.81625947162899,7 +7.004680615330818,6 +4.705007363600694,0 +6.371193666089834,2 +4.6229095218206115,7 +7.072257941642865,1 +6.751828350459519,5 +6.359758823795674,4 +6.780224713436874,4 +6.850018474086452,4 +6.743113018080438,1 +6.659362308662079,6 +6.746162151506149,4 +4.778188461536007,7 +6.482950317176041,2 +6.572182901017555,4 +6.977153035571581,4 +6.697080124595828,4 +6.573513082423481,1 +6.618945070824196,4 +6.829135520667372,4 +6.8060987556413695,4 +6.849524996477258,4 +4.767324608323771,7 +6.854707109417576,6 +7.233782869312423,4 +6.811932999502663,1 +6.3838253124720215,4 +6.744744457942768,4 +6.707861698970235,4 +6.948514652820179,6 +4.793535420438161,7 +6.795015799462305,6 +6.632809618564978,4 +6.598404135312052,1 +6.660777562252721,6 +6.508885275798235,4 +6.803155701199981,4 +6.60168826270381,2 +4.650293287390867,7 +6.763043351942853,3 +6.748515493435586,6 +6.584840840787549,4 +6.330616256304962,3 +6.713333297736223,6 +6.71213430490075,4 +6.983765131066388,4 +6.762085297544339,4 +6.623474823712336,4 +4.860505260286573,0 +6.824357209036116,6 +6.502199595488896,4 +6.618085024671698,5 +6.67162131615852,4 +6.867489949501145,4 +6.918745596757944,4 +6.8263717349551065,4 +6.743118366792812,4 +6.54738527629201,4 +6.678662336966622,4 +6.69106384053542,5 +6.5795483565914035,4 +4.672893731130056,0 +6.8442625830930925,6 +6.505836658516375,4 +6.782959708282549,4 +4.906104026610493,0 +6.859230706366937,6 +6.772420554854872,4 +6.824617701942945,4 +6.745118310770669,4 +6.481227953485937,5 +6.822615382118041,4 +6.803450978581003,4 +6.843423589265219,6 +6.803796152413582,4 +6.315683550668924,3 +6.600180423481633,6 +6.7229388606649785,4 +6.856667450295595,4 +6.644828217246306,4 +4.625306499499098,7 +6.813687915496606,1 +5.002657444466438,0 +6.531789074920601,3 +6.686279646799915,6 +6.473282789512179,5 +6.770033862033054,6 +6.68321348039515,4 +6.948683028445439,4 +6.739134063818074,4 +6.760680799488955,4 +6.788198611894324,1 +6.605782836861434,4 +7.083871922749483,4 +6.71694987431531,4 +6.678477273251375,4 +6.12752277590832,3 +6.777523263542053,6 +6.588030901946514,5 +4.669723302865819,0 +6.477444485301134,5 +6.717531682293459,4 +6.726620170046388,4 +4.874530008658597,0 +6.725975907931507,4 +6.814856769645765,4 +6.774831938440361,4 +7.047879907427848,4 +6.817668129770284,4 +6.841854300440983,4 +6.954224132176878,4 +6.603980314282387,4 +6.629171307018236,2 +6.774128312585302,1 +6.667399564554367,6 +6.536587191451345,4 +6.992866143687788,4 +4.938750763762286,0 +6.958753458919474,6 +6.639058571981277,4 +6.591566948141841,3 +6.710067406355494,6 +6.641756878936272,4 +4.846915517524277,0 +6.780330559773491,6 +6.675916500094401,4 +6.563646257365943,3 +6.659848163684874,6 +6.366129059287656,3 +6.7981033758475,6 +6.472124455050735,4 +6.9031550011807425,4 +6.774868514364507,4 +6.850761337926462,4 +6.7991037293234236,4 +6.930997914084326,4 +6.772688069581671,4 +6.796037396954617,1 +6.694139203029599,6 +6.732263983723029,4 +6.800001659663365,4 +6.540540434710462,4 +6.868360915121768,4 +6.86103304112987,4 +6.766877108435066,4 +7.012244941780353,6 +6.652144105238907,4 +6.931726591861825,4 +6.631735903167242,4 +6.815519005045269,4 +6.767227046793539,4 +6.918116178026723,4 +6.735137681783495,4 +6.766754441182547,4 +6.821064368268166,4 +6.697734415791911,5 +4.823285004384703,0 +4.635148187203351,7 +6.5167628087597915,1 +6.654708962097049,5 +6.459693744071442,6 +4.716360959845202,0 +6.59870799070135,2 +7.122237871861725,3 +6.482022375865232,5 +4.792696799280564,0 +6.9028384215843595,3 +6.622071166298342,4 +6.647557778769773,2 +4.827539079910877,0 +6.408018834230605,5 +4.873586593337281,7 +6.799114675492881,3 +6.94859773889915,2 +6.775840617556914,2 +4.650929559986124,7 +6.888086935134809,3 +6.576900916727333,5 +6.369338078558322,2 +6.863469879631604,3 +6.4215878588239494,5 +4.815908277361602,7 +6.78919335450056,4 +6.563879452677828,6 +7.167585554987423,4 +4.76165406615815,7 +4.977640245813002,7 +4.984224444749858,7 +6.980807088489624,3 +4.853467984449011,0 +6.777681759187582,3 +6.6007718041208685,2 +6.232754912802419,1 +6.706525606450281,4 +4.734387237001404,0 +4.873580393385768,0 +6.742246933345588,2 +4.688768434091538,7 +6.509917670001348,1 +4.659969158396473,7 +6.871499021538541,3 +6.631026010797037,2 +4.828262918517976,0 +6.531733537695761,5 +6.855594256526092,4 +6.401390743788791,2 +6.802760293899608,4 +6.528966346151733,2 +6.9588137281402815,2 +6.762834162053043,3 +7.065445038441534,3 +6.788811666306115,4 +6.356765022725453,2 +7.098901819304809,3 +6.596295800413591,4 +6.421233182056267,2 +6.905755653951364,2 +7.1545432629066665,3 +4.967911061119581,7 +6.421238876633122,2 +7.00490927915352,3 +7.189061956773233,3 +6.753106528538649,1 +6.768232784085998,5 +6.8552929284035935,4 +6.292864502155633,2 +7.187014172650353,3 +4.697638948566852,7 +6.80981804087512,5 +6.339188825955448,2 +6.769775371892573,6 +6.9390400433831,2 +4.548940801033496,7 +6.445896774816368,2 +4.675966628082102,7 +6.505039186749438,1 +6.5194940281500084,2 +4.631032580935827,7 +6.472272747757674,1 +6.707111567680889,5 +6.462200084115711,2 +6.859405916040709,3 +6.6803130580784,1 +6.820516375878397,4 +4.94375551256263,0 +6.609449519233942,2 +6.48396509906263,6 +6.67430390336361,2 +6.66274427855732,4 +7.2628840333251885,4 +6.396187355097056,5 +6.56494380540492,2 +6.782476325696469,4 +4.757921937187006,0 +6.672422512048272,2 +6.923534489030671,3 +6.752708552704308,2 +6.492685613184918,6 +6.900255750512697,2 +6.723538853429134,6 +6.742138434927253,2 +6.415265609226573,1 +4.891964863064094,0 +6.612732046323518,2 +6.710555361844215,6 +6.4548617796856425,2 +7.010861834575822,2 +6.7164152265177774,2 +6.988566977177661,3 +6.404110826804224,5 +6.434090246551272,6 +6.860124580916383,6 +7.075268500079475,4 +6.557123738362252,2 +4.778420333741191,0 +6.558878407227328,6 +6.893733949237588,4 +6.542373408140746,2 +6.764820111434654,2 +7.078348428533176,3 +6.533149770339798,2 +4.775047235585551,0 +6.487570253013024,6 +6.733842443241194,2 +6.834490102492627,2 +6.708530269332377,2 +6.864687470793628,2 +6.840312267893454,2 +6.739205149542183,6 +6.5076516140486085,2 +6.500483745048138,5 +6.5788097184473795,6 +6.798921690449455,3 +6.483746625792875,6 +6.6666025207194455,2 +6.833434077352727,2 +6.766571567617903,2 +6.574868272257214,6 +6.636254017632267,2 +6.7608197038586315,2 +6.35529959627112,1 +6.407441379742723,6 +6.658717048724773,2 +6.81993284111706,2 +6.6132886614667745,6 +6.494193609314183,5 +6.954650572039939,5 +6.746468285761106,3 +6.923249542442416,2 +4.81423122968154,7 +6.332643033136488,2 +6.734012130967666,2 +6.768717160574915,2 +7.09094156916065,2 +6.3398400642558155,1 +4.701260614675717,7 +5.0412405107163485,7 +6.395984300359229,1 +4.876316714428048,7 +6.545411898265267,2 +7.035797295908074,2 +6.926666877437278,2 +6.482906223320097,1 +4.831004053965793,7 +6.312634170632582,2 +6.862501390651541,2 +6.433059036247722,6 +6.5232941143498335,2 +6.452163791563293,5 +6.793339557448164,5 +6.253338005863943,2 +7.093902451937369,2 +6.890757936963002,2 +6.586556900352675,2 +6.651465937562919,2 +6.608873804808014,2 +7.051623625590537,3 +7.217988858252487,3 +6.676806526959112,2 +6.706794351322843,2 +4.812032727388289,7 +6.646794858499694,1 +4.706991447338778,7 +6.3059423441327525,2 +4.881784556921243,0 +6.780861325785067,2 +6.586893491025596,2 +6.641988780762483,2 +6.621161253488288,4 +6.48100416692443,2 +6.692645387519087,2 +6.767453363161778,2 +4.783431444843133,0 +4.879327369757625,0 +6.480475844747032,6 +6.6142075204833475,2 +6.952125943380504,2 +6.596347717997342,1 +4.857509526440352,7 +6.856207856153847,5 +4.90265082481163,7 +6.457431111423692,2 +6.974024390165155,2 +6.783533329417106,4 +6.561408173586828,1 +4.746922438426196,7 +6.710759687082876,2 +4.651763014567645,7 +6.5890845458451155,2 +6.785959377832711,6 +6.885859154950958,4 +6.604099536764682,2 +6.602713367408438,1 +4.585476566452445,7 +6.839290806153919,4 +6.459639639710905,2 +6.885126865535629,2 +6.868867489977678,2 +6.761800577997985,2 +7.059461788440483,3 +6.630486714958755,1 +4.836215096032388,7 +6.483358628945836,2 +6.643452724008156,2 +6.4170750169790916,1 +6.683912349383634,4 +6.497105455766894,2 +7.082570985190095,2 +6.873647200001074,2 +6.783719194952966,2 +6.636433846848785,2 +6.853966605978643,2 +6.903812839208045,2 +6.7381479224092145,2 +6.89953889067702,2 +6.807741195041877,2 +6.800351618590426,2 +6.945140404445634,2 +6.772511153055815,2 +6.828595666911729,2 +6.678958931383526,2 +6.664435687969451,2 +6.687281510410486,2 +6.8714480427734905,2 +6.909989779354218,3 +6.766627368536957,4 +6.483095440616445,2 +6.9965327633836605,2 +4.750256494981228,7 +6.390221779315762,2 +6.403316609009817,5 +6.466650853684245,2 +6.813965533765609,2 +6.878946366790471,2 +6.769350152249223,2 +6.612047217603581,2 +6.763969046940287,2 +6.8655225647142455,2 +6.874496032282726,2 +6.418827935219188,6 +6.532199123683955,5 +6.570624885342408,2 +6.717865900078263,2 +6.765124112136088,3 +6.625974956910331,2 +6.704411178707384,2 +6.829155035148023,2 +6.800219444717845,2 +6.963587517506126,2 +6.7583310870223725,2 +6.883944339243247,2 +4.772907503350589,0 +6.731609392681313,2 +6.786530127720396,2 +6.835999870083502,2 +7.012508952311648,3 +6.586349527422024,2 +6.779643366876816,2 +6.752710863033338,4 +6.418795534473724,2 +6.694621214310879,4 +6.411475641062907,2 +6.700688812286194,2 +6.868136607221391,2 +6.769557948453097,2 +6.908727067177682,2 +4.686629878188014,7 +6.513922247053884,2 +6.564937630970087,2 +6.78805015524471,2 +6.781401240930659,2 +6.950073638071977,2 +6.818839585686646,2 +4.827305899962839,0 +6.625725206442119,2 +6.646622070222393,2 +6.870466810229721,2 +6.8948757433700685,2 +6.866622218716586,2 +6.847684360522711,2 +6.89662314038699,2 +7.272723851702416,2 +6.847180150132428,2 +6.630883188234954,2 +6.680487138826483,2 +6.548402428210816,1 +4.761890587728286,7 +6.56999685971989,2 +6.814115025133901,2 +7.075664524757548,2 +6.918658652245819,2 +6.31142200398225,5 +6.594928259008886,1 +4.688039162055762,7 +6.519604681350461,2 +6.725006441523548,2 +6.163111189599671,5 +6.396873046243297,2 +6.917376818442564,2 +6.7811375857116705,2 +6.795062382842575,2 +6.705639174037923,2 +6.833018169023959,2 +6.783308355660662,2 +6.670381685229752,2 +6.811268409520093,2 +6.709129591027025,2 +6.8376608884184815,2 +6.82402511105268,2 +6.985182237763901,2 +6.726436572881194,2 +7.115010654693477,3 +6.7446693431509175,2 +6.951562939265463,2 +6.621312964560535,2 +6.7143411157288675,2 +6.830855930819298,2 +6.649300525313295,2 +6.6930306651681315,1 +4.698618446407919,7 +4.635932410885211,0 +4.647017285341473,7 +6.636591189675829,2 +6.785216157740697,2 +6.823547362954864,2 +6.784595793657796,2 +6.912384525148694,2 +6.768469900660265,2 +6.899381314178442,2 +6.925565670119831,2 +6.811762840913272,2 +6.733789553340575,2 +6.701401985579347,2 +6.925250548737162,2 +6.654645668146807,2 +6.6685727336737814,1 +4.779400531373413,7 +6.627109127277059,2 +6.985863471449841,2 +6.817486530826296,2 +6.836012076080973,2 +6.637037748054982,6 +6.935166394681237,4 diff --git a/openAI_RRM/data_20190419/rrmagent_3112_3.csv b/openAI_RRM/data_20190419/rrmagent_3112_3.csv new file mode 100644 index 0000000..a3d7e98 --- /dev/null +++ b/openAI_RRM/data_20190419/rrmagent_3112_3.csv @@ -0,0 +1,649 @@ +6.887414075721828,2 +4.718094846278722,7 +6.745715373742166,3 +4.757561452404991,0 +6.580142200678387,4 +6.519030446950019,2 +6.606719365968726,4 +6.831207870892592,2 +6.51466787135461,1 +6.67876258597696,1 +6.789291429980543,2 +7.0912923728200346,2 +6.551059674655531,6 +4.826524686892608,7 +6.9017521860488324,2 +6.514206435294882,1 +6.897927722646564,1 +6.55987973853171,3 +7.014662701014349,3 +6.502130353906492,6 +4.60373604273855,0 +6.654293467994039,5 +6.699415409992699,4 +6.844466084521565,4 +6.646575929069802,2 +6.45942718445601,4 +4.772038656673253,7 +6.64857935557543,1 +4.71420187097159,7 +6.423967452673019,4 +6.768124799325541,4 +6.870955558879764,5 +7.035669799797182,5 +6.313959650947179,6 +6.894295849082735,2 +6.762524778844077,3 +6.308210419194784,4 +4.680643893446482,7 +6.698401164014321,3 +6.974325922968385,2 +7.007210618384056,2 +7.169481754150559,2 +6.929177275760532,2 +4.6941802656065015,7 +4.798523080237986,0 +6.410273914520817,6 +6.230026241111211,1 +6.613315769373009,2 +4.689326824151603,0 +6.513566393273335,6 +6.8159433031063275,5 +6.8530040157804395,1 +6.730235059546604,2 +6.668082532903461,1 +6.54314173603967,4 +6.390870926647216,3 +6.885324765577585,3 +6.90850304211806,2 +6.577035839392649,6 +6.148857261474677,1 +6.195032409143339,6 +6.849234090386698,2 +4.863886358024637,0 +6.440542306935673,3 +7.100915002452575,2 +6.540359453399636,3 +6.563906955893377,1 +4.743101535452526,7 +6.902994118623044,2 +6.727867438504598,6 +4.892482183912744,7 +6.805385819832114,6 +6.48410190973385,1 +6.523927358521567,2 +6.553941528314474,4 +6.8309792013697095,2 +6.327743849529155,1 +6.863957220418823,2 +6.4574053784687155,4 +4.644487834632592,7 +6.928971912404324,5 +6.85837411521055,2 +7.28618092505268,2 +4.860219532828274,0 +6.971702766053291,2 +6.750181134223152,5 +4.918390064872268,7 +5.126304398134322,7 +6.701378988629157,2 +4.858943406500164,7 +7.035544168929763,2 +7.067066983479298,2 +6.5285969242258535,4 +6.580757159024011,6 +4.842042180694304,7 +4.589554056691207,0 +6.921040782598586,2 +6.851013105499092,3 +6.558296270877381,1 +6.430710665044835,6 +7.076190348771562,2 +7.189980402761332,2 +7.077775890354153,2 +6.395484815075892,1 +6.593771967377323,3 +6.991932722122497,2 +7.060475266582605,2 +7.171012785435965,2 +4.933983464191646,0 +6.36878988903209,3 +6.286761549880828,4 +4.784691588113301,0 +6.95544945934303,2 +7.0713313181832165,2 +7.2729547136353245,2 +7.2930797692772105,2 +6.683729317991438,6 +6.832409643564744,5 +4.906196108368731,7 +6.731821652529674,2 +7.199192073778076,2 +4.655510075698499,0 +7.008771945947289,2 +4.785805347590032,7 +6.807180211792257,5 +6.86741783089598,1 +6.701331351656759,2 +6.637339131106578,5 +6.80546159147218,1 +4.745919665710972,7 +6.876547655371041,2 +7.027094813153536,2 +4.756896591452014,7 +6.575504963080581,3 +7.0014611424048185,2 +7.0418840645946545,2 +7.191248556944677,2 +7.02160977465606,2 +6.408702970557842,1 +6.898262747239636,2 +4.581495345151043,7 +6.908506879043952,2 +6.434161119317006,3 +6.947232462576488,2 +4.708960829833484,0 +7.08185820027288,2 +7.161783029333725,2 +7.191734434688489,2 +7.087128160415076,2 +4.762765317378505,0 +4.557241565839825,7 +6.666558866254259,4 +6.6640588558406595,6 +6.759878395519751,4 +6.626491993549795,1 +6.7623172664080045,2 +6.58259795568086,1 +6.804978108415213,2 +7.050914928709833,2 +4.641054885411157,0 +7.17810565746133,2 +7.2008244417404885,2 +6.840347572681952,2 +4.752326258748059,7 +6.6883686915662315,2 +6.6623615055393905,5 +6.459695545755975,6 +4.88868960128648,7 +6.9296522785329575,2 +6.652453553677391,1 +6.617495707793623,3 +6.887491828216331,2 +6.54781275937773,6 +6.401470417800427,1 +6.589735368202199,2 +7.209545406781348,2 +7.1727631328482815,2 +7.151875415633828,2 +6.803297939677086,3 +6.763196211195993,3 +6.401484217353945,4 +6.588074741760973,1 +6.486824422855505,4 +6.7356726519784464,1 +6.808877721452447,1 +6.79555275926726,2 +6.955482634559968,2 +6.8544088061047495,3 +6.300067201197185,4 +6.3089800913989835,1 +6.710099680718566,2 +6.3874049096477155,1 +6.760708356988134,2 +7.10087681517851,2 +7.235051297126325,2 +7.124104164650502,2 +7.411862412284919,2 +7.029554151884315,2 +7.05042841133991,2 +7.211621281054053,2 +7.0258414478544795,2 +6.686087806509318,4 +6.44524825317387,1 +6.843843215432838,5 +6.873107405657101,1 +6.8915959211554565,2 +4.750748752487059,7 +6.655461383981394,1 +6.737142103085787,2 +7.136727054305774,2 +6.831624841618182,5 +6.34653933889665,3 +6.967350842850236,2 +7.102139897324151,2 +7.07879277822515,2 +7.257984096467356,2 +7.13750134518493,2 +7.012586586686245,2 +4.848102641637577,7 +6.695741122291507,2 +6.527036197764915,4 +6.240584443105967,1 +6.9267568417244005,2 +7.0848750742742785,2 +6.522376132697514,6 +6.306006032286709,1 +6.652265613018879,2 +7.119636205910694,2 +7.344159422231172,2 +7.132749950795036,2 +7.294927924500635,2 +7.2265954716171485,2 +4.754309025762997,0 +6.938898417208357,2 +7.145289147284758,2 +7.034522718323233,2 +6.885001323494666,2 +7.012511403566747,2 +6.516682549608479,4 +6.744503531856503,1 +4.869438342697354,0 +4.54457178167707,7 +6.867376728999767,2 +6.857255678300523,3 +7.03104723694376,2 +4.783039609375335,7 +6.55283075986969,1 +6.788698211543853,2 +6.9644160398801045,2 +6.5329963993211475,4 +6.762819837565891,1 +6.67675829775959,2 +7.149672119789429,2 +7.166402790973037,2 +7.2095002738287,2 +4.744921317781666,7 +6.871047955232303,2 +6.883542161014978,2 +7.145426983242385,2 +6.674902854788758,1 +6.671413059301875,2 +7.137441007206579,2 +7.104653527945069,2 +7.1586702833288225,2 +7.226731928221141,2 +6.404613549126704,4 +6.4287243026805285,1 +6.814283932005976,2 +7.112815367268273,2 +6.995124430932497,2 +7.039095946181671,2 +7.116901448229564,2 +7.0746564658414375,2 +6.85944082977186,2 +6.785826930211253,6 +4.7585535074022935,0 +4.449516066206918,7 +6.695981744924542,3 +7.075410937775038,2 +7.012997808823056,2 +7.030710192771588,2 +7.276335492370618,2 +6.9411753711461674,2 +7.229854604248237,2 +7.247507146300592,2 +6.744714674149321,6 +6.311458986015041,1 +6.711572225412712,2 +7.034969345311512,2 +7.123690536001078,2 +7.184111400254236,2 +7.136467748818542,2 +7.086570537325417,2 +6.973140423321154,2 +4.768528091354929,7 +6.429090653647949,1 +6.5476688917806,2 +7.229140629557616,2 +6.601749684618846,5 +4.775147451343484,0 +6.941425686739179,2 +7.2639514002462215,2 +6.833188052901727,5 +6.610028266819968,1 +6.92220776713778,2 +6.8364723797842055,2 +6.8871656711361045,2 +7.153710964443773,2 +6.702590816177261,6 +6.296441486450073,1 +6.7814177573376,2 +7.146117305233009,2 +7.050018378015808,2 +6.6571972718864405,3 +6.933916825893602,2 +7.487437857685921,2 +7.205937020518502,2 +7.250600300183149,2 +7.207645994870185,2 +7.019357343671249,2 +6.692901929110092,5 +6.502959357976431,6 +6.300180069651586,1 +6.74340823806821,2 +7.222159722217586,2 +7.0982792565901365,2 +6.979675397039113,2 +7.135653121986886,2 +6.964618525012605,2 +7.1565351968892665,2 +7.132309776258849,2 +7.153447707263973,2 +7.165405336449491,2 +7.098456054373325,2 +6.452088082615827,6 +6.606857042838057,3 +7.034366456663312,2 +7.110459937903316,2 +7.166913030154357,2 +7.083134030021662,2 +7.228830376187485,2 +7.042685242551297,2 +6.998790417684559,2 +7.014252053659015,2 +7.085371144596009,2 +7.170040841556342,2 +7.047653465505704,2 +7.187819352423159,2 +4.7138783092045236,0 +6.883527361595422,2 +7.2116417840271,2 +7.071026345145955,2 +7.185935230284382,2 +6.972745648723549,2 +6.795626667462495,5 +6.668103920988512,1 +6.981959412831372,2 +7.109778174221166,2 +6.911523114830347,2 +7.086308331834429,2 +6.624731098711953,4 +6.558135300071395,1 +6.737065600487769,2 +6.9865672821677665,2 +7.121623230363212,2 +6.647210108314044,4 +6.509722897539493,1 +6.882841989421272,2 +7.191812899309835,2 +7.092283542225987,2 +7.205291086483647,2 +7.225212028848427,2 +6.924731637570205,2 +7.049846155840826,2 +7.111082019439738,2 +7.071725794661011,2 +7.221904682733002,2 +7.086552441590457,2 +7.064318658066902,2 +7.261929611836799,2 +7.148395570532595,2 +6.58001101905215,1 +6.952663011294758,2 +6.989779059970114,2 +7.156297858101745,2 +7.143762122706454,2 +7.146652290568226,2 +6.3567426354901295,4 +6.705227940705013,2 +7.001567990950858,2 +7.010520335493877,2 +6.9536584032521995,2 +4.857992361773129,7 +6.906054152817012,2 +7.10422946039795,2 +6.997740910221751,2 +6.5962035140541575,6 +6.529547269864857,1 +6.861066210386174,2 +7.135333876072216,2 +6.9577005211402785,2 +6.560431096630726,3 +6.783843385856799,2 +6.512911377233959,4 +6.541127113210209,2 +6.998871896281996,2 +6.33018844402624,1 +6.852180334621885,2 +7.172757833096405,2 +7.271656412612685,2 +7.182653047407584,2 +7.068339331753437,2 +7.119377698094983,2 +7.060226415307908,2 +7.064161604936249,2 +7.174046688032874,2 +6.911041779570373,2 +4.706855327698724,7 +6.902371952593784,2 +7.1739124981341895,2 +4.8759044367369855,0 +6.904076226094892,2 +6.89051914472916,2 +7.0012621119824034,2 +6.51053883717375,1 +6.826065130669118,2 +6.513920420344528,1 +6.9231677046411315,2 +7.125494844972464,2 +7.169116483830683,2 +6.527833355483084,6 +6.832403286938247,2 +7.251195421985401,2 +7.211289303278462,2 +6.829431895738969,2 +7.057265538395093,2 +7.0778022991581695,2 +7.174677987979819,2 +7.08773268438678,2 +7.008074278448999,2 +7.160141068056018,2 +7.079424937157954,2 +7.017435778298737,2 +7.19429781071124,2 +6.792218787058258,2 +7.123032376723284,2 +7.014074174796017,2 +6.9956849187503005,2 +6.4356397913456345,1 +6.769603086555082,2 +7.081156749730198,2 +6.879831526457943,2 +7.0791632164523595,2 +7.007860130893554,2 +6.816001437884218,2 +7.23623371951579,2 +6.974629442979263,2 +6.973942472510182,2 +7.062825608877885,2 +4.682139883647961,0 +6.788974327610149,2 +6.880302641534022,2 +7.090699070855067,2 +7.096827166494079,2 +7.1047363337881855,2 +7.193828279733114,2 +7.13960758937641,2 +6.847858110743724,2 +7.08245037911488,2 +7.058716105600585,2 +7.022285441101827,2 +6.9967933416183135,2 +6.674473982795243,3 +6.92275015527668,2 +7.0641844134202865,2 +6.977197476849362,2 +7.110859648621506,2 +7.068556290240374,2 +7.012571703384504,2 +7.054728587000559,2 +6.917926384133066,2 +7.014963523796807,2 +7.321234164433157,2 +7.167252492185729,2 +6.4156639721861355,1 +6.824639500503156,2 +7.266777501358706,2 +6.863087634620158,2 +7.0116303083174865,2 +4.917523463739941,0 +6.876802314216125,2 +7.351225056688073,2 +7.515833917020921,2 +7.097786484934793,2 +6.993336754637654,2 +7.0127723068664265,2 +7.135369157973508,2 +7.042094324700373,2 +6.887045801961919,2 +7.110962565526885,2 +6.979036225463545,2 +7.094723017804269,2 +7.157221512820721,2 +6.974431715662656,2 +6.443214487537616,3 +6.95112343302025,2 +7.35741941738186,2 +6.897533866648619,2 +6.981641844172393,2 +6.861378191753909,2 +7.096689840960232,2 +6.626306456526658,3 +6.84036176519341,2 +7.002164684105142,2 +7.023943295328678,2 +7.177482230528363,2 +6.868978064844113,2 +6.897396262881111,2 +7.231673727188659,2 +7.010476194243184,2 +7.097899033827731,2 +6.854420846718951,2 +7.059851741412765,2 +7.207930161005869,2 +7.077231577443846,2 +7.010776581603543,2 +6.5007482067674225,3 +7.086983717963149,2 +6.906241577418293,2 +7.181472349202002,2 +7.216338941639041,2 +7.0408553437840125,2 +7.0228470472970095,2 +6.820820097403597,2 +7.104966151194603,2 +7.0896842121902885,2 +7.1166877781552085,2 +7.097171988761735,2 +7.049877617745723,2 +7.062109766798581,2 +7.057720256636125,2 +6.957797762511305,2 +7.2622534825267575,2 +6.86222943440757,2 +6.943569443952972,2 +7.084878014586918,2 +6.850975768546169,2 +7.200819837050831,2 +6.996716236183808,2 +7.078408529625916,2 +7.244771270725372,2 +7.107736082974409,2 +7.274714987083584,2 +7.136973165545357,2 +6.603548469687574,1 +7.055498014524659,5 +6.789053727978237,2 +7.0586944875882836,2 +6.85064405416892,2 +7.206814835189716,2 +6.978608139597854,2 +7.226254050639136,2 +7.118182949181383,2 +7.0983041006882175,2 +7.085803373697984,2 +7.165458464281373,2 +7.148349586553211,2 +7.161628727324905,2 +7.178358433974093,2 +6.988503297405536,2 +7.079338759677325,2 +7.142110376384156,2 +7.005709544909307,2 +7.071497358779302,2 +7.2224903095786095,2 +6.960133136711547,2 +7.256393508006335,2 +7.1326919106058515,2 +4.923022742970568,0 +6.876454442158703,2 +7.1480359548070584,2 +7.2141627317983135,2 +6.897990424703015,2 +7.076121453421546,2 +6.98211539729399,2 +7.001768770634957,2 +7.2359458455695345,2 +7.304864500158633,2 +6.969735230968,2 +7.088957853931972,2 +7.248387701454592,2 +7.126834625006146,2 +7.08992949317426,2 +7.116092354473977,2 +7.205832621218591,2 +7.067802646176762,2 +7.097805945053139,2 +6.828896722383232,2 +7.109948673222116,2 +7.189584087050889,2 +7.241571803461217,2 +7.032439054029599,2 +7.147023361277949,2 +6.870970418266907,2 +7.226900249876181,2 +6.913065107547596,2 +6.9951054132625154,2 +7.109313050947658,2 +6.998822008971463,2 +6.9854538243012465,2 +7.046426332056661,2 +7.291565998460805,2 +6.811100902166013,2 +7.008431224790929,2 +7.093190605577955,2 +7.045317879433544,2 +7.159247779879753,2 +6.799417190751053,2 +6.971244415269992,2 +7.171617878570086,2 +7.170062259126816,2 +6.995099912184919,2 +7.093345865065735,2 +7.111165030141072,2 +7.247631117834421,2 +7.079055958869053,2 +7.215430628394277,2 +7.214125378164659,2 +6.988167698532716,2 +7.170444062581934,2 +7.098947817399639,2 +7.243233925001257,2 +7.029930427405382,2 +7.180492203367446,2 +7.2115652681525315,2 +7.199516088117404,2 +7.149843501929368,2 +7.225551840974223,2 +6.830932997963846,2 +7.173364018235571,2 +7.12555224859319,2 +6.940204221896011,2 +7.030163993619739,2 +7.04151347995633,2 +7.18284390348669,2 +7.234418831513361,2 +4.741235987742794,0 +6.690727247292909,2 +6.987032740160663,2 diff --git a/openAI_RRM/data_20190419/rrmagent_3222_1.csv b/openAI_RRM/data_20190419/rrmagent_3222_1.csv new file mode 100644 index 0000000..91112f6 --- /dev/null +++ b/openAI_RRM/data_20190419/rrmagent_3222_1.csv @@ -0,0 +1,410 @@ +9.812701874373218,3 +8.491455891586568,1 +8.552735099464131,1 +10.480649320172567,3 +8.353748795705739,1 +6.713028227923214,7 +6.804499909640788,0 +10.534349996101161,4 +10.058300640839603,3 +10.822875600023096,3 +7.9171903705650255,5 +8.63508371836414,5 +8.3961718089497,1 +10.407205257069606,3 +8.436113213311248,2 +8.71941977133064,2 +8.370296301279309,2 +8.147990830291437,1 +6.630126368940135,7 +8.352385203896981,5 +10.140343417887541,3 +8.273216494003172,5 +8.32597695807032,6 +10.566939197551829,4 +9.873022866863991,3 +8.401251922058343,6 +8.090871352727527,1 +10.180787514809582,3 +8.143780434902322,2 +8.458654751644254,6 +6.841444037687499,0 +8.070431644614857,6 +8.094247136421911,1 +10.473070383720472,3 +7.063550298237454,7 +6.5748915640044245,0 +10.13185971497084,3 +6.894477952687428,7 +10.189379882407454,4 +8.267066670476751,1 +8.54376819029258,1 +10.441426946156616,3 +10.629030259779706,3 +8.520580716189295,1 +8.270433660347884,5 +6.5737285550335285,0 +10.399260064347482,4 +6.832878535810282,7 +7.175321025668087,7 +8.3006118372142,6 +8.50996559136298,2 +8.473300498139965,6 +7.925314486912709,1 +7.707609953305794,6 +6.607454025576688,0 +10.046695922410825,3 +8.11017718063051,6 +6.930185361617012,7 +7.035241479495859,7 +10.63368026099255,3 +10.661188661074862,3 +6.645326723198645,0 +10.122629930296938,3 +8.213103587243419,5 +10.601620178071082,4 +8.270869100052874,2 +8.09952851682838,6 +6.73226653634534,0 +9.992368142142649,3 +8.293966967008387,1 +10.491109159575645,3 +10.422720595857514,3 +8.172186072473867,6 +7.983178851762265,5 +10.405710418039696,4 +8.139248364573673,2 +6.612588299887981,7 +8.354084586890494,5 +10.470226734520988,4 +8.082329977811685,1 +6.793634122863746,7 +10.379388355572543,3 +10.404260361077162,3 +8.40122041576632,1 +10.232727412727126,3 +10.716893637695778,3 +8.26858348921157,2 +8.614508922859896,2 +10.42862818703155,3 +8.61851845453594,2 +10.47422562769693,3 +8.176612516671609,6 +8.083117814079406,1 +8.487562996678355,5 +10.678794628969264,4 +8.208322633043222,1 +10.36929075097516,3 +10.802278496211496,3 +6.925025890274648,7 +10.383218150649821,3 +8.419668207007518,1 +8.614639861371119,1 +8.515515051033775,1 +6.826114474996304,0 +8.456395220452896,2 +10.377957932565385,3 +8.559999667610386,1 +8.038153939819617,6 +7.943851607502202,1 +10.450706061357417,4 +6.555885181109118,7 +10.388919409154425,3 +6.850897263969735,7 +10.324309930242991,3 +10.674415440918478,3 +10.555221340355223,3 +10.496971956161351,3 +6.859738763894373,0 +6.595300970501708,7 +10.401974561479532,3 +9.88508540164419,4 +7.991846214223993,1 +10.424255132486753,3 +10.560130013445312,3 +7.0494083411891015,7 +10.091908409113627,4 +8.242666310087218,1 +10.216820003590433,3 +8.293862437008212,1 +8.680742332473965,1 +10.565012975376812,3 +10.729047300547636,3 +10.774728451473253,3 +8.01183033625074,6 +8.12416095719434,1 +8.722390416078998,1 +8.388866670620052,5 +6.915604440502616,7 +8.28430896211517,6 +7.984530679854959,1 +8.42004052433898,5 +8.459714253582497,5 +7.391201056736812,7 +10.222913231810201,3 +10.67948772308116,3 +10.693990434444867,3 +10.462514644794803,3 +10.705410740156347,3 +10.701641569150567,3 +8.290150239528934,2 +10.581536980034361,3 +8.504384662250185,1 +10.525265872916329,3 +10.510327794202112,3 +8.572781683212142,1 +8.113515410606592,6 +8.001731951095948,1 +6.893966888715342,0 +6.6743275243755775,7 +6.695838671996895,0 +10.255220458922572,3 +10.583666198449668,3 +6.879734044133519,0 +10.102698525438592,3 +6.69705076655632,0 +10.117571628821503,3 +8.14167048381494,5 +10.295211651827099,3 +10.654029180865681,3 +10.77065788928542,3 +10.690682793224322,3 +9.878706958353876,4 +8.297585886740853,1 +8.570384805914466,1 +10.441559407320524,3 +10.714132568283262,3 +8.204023712258573,1 +6.739683701312867,7 +8.406192387126527,5 +10.254056331330977,3 +10.41653156355472,3 +10.360023111919716,3 +10.684871346619333,3 +10.50854981592191,3 +10.859092906626884,3 +10.433712841681743,3 +10.54630573288137,3 +6.664241809342063,0 +8.061874592096977,5 +10.219106403033388,3 +10.666443426992437,3 +10.526119060990172,3 +10.693862616246916,3 +6.74346258825814,7 +6.683921669422815,0 +8.27360700410578,2 +10.406009175070611,3 +10.532626395986274,3 +10.535234768223978,3 +10.556692814193914,3 +9.823833027682646,4 +8.20759446595856,1 +10.54525936983597,3 +10.640774493468534,3 +10.716544215316713,3 +8.246078711104099,5 +10.180935292720422,3 +10.761046934108089,3 +10.409252852577334,3 +10.574424773663523,3 +10.238918267110305,3 +10.654794649137507,3 +10.563045147068518,3 +7.074716826647925,7 +10.314161162618067,3 +10.678025350044567,3 +10.760028263331359,3 +10.701908105341763,3 +8.074033504959882,6 +8.146765147522075,1 +10.360311053452575,3 +10.86639640874286,3 +10.821817106286277,3 +10.684215451412012,3 +10.525650586565378,3 +10.477642206289628,3 +10.399167782626385,3 +10.782317322860878,3 +8.231248772125598,1 +10.503103512076995,3 +7.020854977739524,7 +10.53648554124922,3 +10.665571381027666,3 +6.800087598825165,0 +10.209039907849863,3 +10.620009615905662,3 +10.820212546652904,3 +10.661710378011083,3 +10.598900921137384,3 +8.305465603796193,2 +10.32503815236893,3 +10.784097226371864,3 +10.429315726050572,3 +10.860116967366395,3 +10.79121992370331,3 +10.65945631556831,3 +10.559855370558722,3 +10.793016933732831,3 +6.843900953145404,0 +10.661961839571918,4 +10.683109153982791,4 +8.244566352439437,2 +10.371267166634231,3 +6.611447422576799,0 +10.130366362805857,3 +10.82054146344202,3 +10.636119701134051,3 +8.102889335627506,6 +8.086529097641343,1 +10.401652753587982,3 +10.581577416309893,3 +10.583143349577139,3 +6.704469529380542,0 +10.32329367625126,3 +10.774268853695371,3 +10.805329288130471,3 +8.122697278104967,6 +8.147396020004246,1 +10.361793810870536,3 +8.334890131861648,2 +10.404030572484361,3 +10.563442907679729,3 +10.737500087500873,3 +10.944184972280238,3 +10.826118172618504,3 +10.811499142982013,3 +10.45387399448265,3 +10.796078331441976,3 +8.24383813213128,6 +8.070625893703273,1 +10.41876359881867,3 +10.744096638482775,3 +10.460191049357002,3 +10.613465463759434,3 +10.71437586331135,3 +10.794853591738415,3 +8.45704063854754,2 +10.305951387942565,3 +6.726406422877,0 +8.444458175085828,1 +10.32472728327333,3 +6.844659317293802,0 +10.260745630033036,3 +10.865728023255887,3 +10.735435346014684,3 +6.854949131223283,7 +10.270628494957597,4 +8.240667372602779,1 +10.228753996246455,3 +10.6185979111892,3 +8.523447560454922,2 +8.396439434086604,6 +8.107804648422546,1 +10.501143322829627,3 +10.77085621905009,3 +10.518435870187831,3 +10.538781631912705,3 +10.728776063775552,3 +10.504603046065572,3 +10.870111064520835,3 +10.397706290884162,3 +10.64507931543049,3 +10.732523113306309,3 +10.852851765245909,3 +10.49472780829358,3 +10.874041423518108,3 +10.68838630891914,3 +10.663640148159308,3 +10.692974755691875,3 +10.636026267027566,3 +10.829006335476635,3 +10.533321562996678,3 +10.871124248604767,3 +10.769978607008918,3 +10.62481194989674,3 +10.504407227967636,3 +10.774172163537056,3 +10.623982188126941,3 +10.680470777581942,3 +10.868514055449024,3 +10.692578127467433,3 +10.662687571214384,3 +6.870025979783712,7 +8.460828807919825,2 +10.283668939249525,3 +10.42957251656412,3 +10.479326684107553,3 +10.82225151154034,3 +10.978286499484458,3 +10.250494305227312,3 +10.716519561011538,3 +10.436385334623536,3 +10.950988058274676,3 +10.7349656378842,3 +10.700080817449606,3 +10.614119382026644,3 +10.781345707331845,3 +10.506442721749641,3 +10.729049255060614,3 +10.68260534899274,3 +10.638141181317854,3 +10.611250653799207,3 +10.63950529583829,3 +10.710778503185624,3 +10.522868648257338,3 +10.365841250212066,3 +10.616292770658797,3 +10.678275161818208,3 +10.620280884016653,3 +10.78373277977204,3 +10.556204835685458,3 +10.563072429180442,3 +10.563865658885257,3 +10.41314458150014,3 +10.562002991571605,3 +10.83711962167629,3 +10.61460256587744,3 +10.276113872122211,3 +10.49668934300221,3 +10.376928871949707,3 +10.730024033860438,3 +10.73294748867524,3 +10.841449903382742,3 +10.508119745008633,3 +10.930783493712758,3 +10.551538245522213,3 +10.604884282156522,3 +10.537186589694347,3 +10.491944696853484,3 +10.636403006501219,3 +10.85891261143617,3 +10.547434032242563,3 +10.880271006072336,3 +10.57192601999418,3 +8.35904841734335,1 +10.419178710140704,3 +10.827774039939106,3 +10.767254690438127,3 +10.511086585582015,3 +10.643271696597566,3 +10.963759650587118,3 +10.526410773747479,3 +10.85761047841791,3 +10.341939876850887,3 +10.659125684960356,3 +10.553251677815094,3 +10.626926152055082,3 +10.617790323825753,3 +10.520468119191543,3 +10.604419650003935,3 +10.577072164852877,3 +10.620389333640718,3 +10.518178627368242,3 +10.751185357949021,3 +10.52210937461138,3 +10.795065220439556,3 +10.480946367937982,3 +10.504561963181349,3 +10.752860095835631,3 +6.963297769168678,7 diff --git a/openAI_RRM/data_20190419/rrmagent_3222_2.csv b/openAI_RRM/data_20190419/rrmagent_3222_2.csv new file mode 100644 index 0000000..ca7e49e --- /dev/null +++ b/openAI_RRM/data_20190419/rrmagent_3222_2.csv @@ -0,0 +1,361 @@ +7.921791175358099,1 +10.304165827910355,3 +8.39562033297661,5 +6.806042332939603,7 +8.471994622784736,6 +7.940285619275252,1 +8.133778014799539,2 +6.889915414023765,0 +7.274105856323,0 +8.231520140996873,2 +10.4187076735278,3 +6.83541021221711,0 +10.013617219870214,3 +8.35369843076278,5 +10.321704879310147,3 +8.297310819692164,1 +6.869537743102318,7 +8.361867359116928,5 +9.96891807780947,3 +7.890116991649993,5 +7.984244664377951,2 +6.82383120371731,0 +10.50248746833308,3 +10.14725185055538,4 +8.16996437889015,5 +8.049759219401663,6 +10.562454083293915,4 +8.149860037348647,2 +8.318715738475897,6 +6.715315258331027,0 +10.102063676649028,3 +6.86452634385401,0 +8.185814487961585,5 +10.341749941388287,4 +8.28815570600751,2 +10.656719761300584,3 +8.467878846657632,2 +8.493943022679662,2 +8.586843267649748,2 +10.089780073455353,4 +6.631916892747119,7 +6.628778047542292,0 +6.6391491764520305,7 +8.421736869407379,6 +10.388297341894912,4 +8.281438939794752,5 +8.22079473770334,1 +8.48416886392121,1 +6.562555758830719,7 +10.226378258249122,4 +10.575297996209676,4 +10.757346723531395,4 +6.9179427464867915,0 +8.131432506340552,5 +10.052682405727825,4 +11.04509087801626,4 +6.580319249925055,7 +7.0402276957925025,7 +6.935978743098107,7 +8.504593371791444,6 +10.393292164262292,4 +8.375479506102721,6 +10.492133166839785,4 +6.880569001403588,0 +10.385219356498776,4 +10.656737420291428,4 +8.276383063481218,5 +10.334060298685214,4 +6.840412843615737,7 +10.110501976430363,4 +9.970711329337453,3 +6.977856603071969,7 +10.113319345109645,4 +7.127824038542501,0 +10.370713905697333,4 +6.743213850057416,7 +10.127264198892442,4 +9.905100869546223,3 +10.759669787547896,3 +10.017106949205742,4 +6.7231491117485325,7 +8.227841474311742,5 +8.06372911343728,2 +8.203767013998842,1 +8.523994765384924,5 +8.322431201366088,1 +6.84001030060437,0 +10.351519803946857,4 +6.760750392526594,7 +7.102422768900895,7 +8.444606012265071,1 +8.23472709538929,2 +8.736483487497882,2 +10.25730627103109,4 +8.306487361039348,6 +8.263451325453314,5 +8.058435980008623,6 +7.979551563119421,1 +9.976218041734416,4 +8.361821958518465,6 +8.433869774008674,2 +10.316676055887003,4 +8.210969768560897,1 +9.945394600284063,4 +10.81358081878722,4 +10.537495590326598,4 +8.360146577889655,1 +10.166979730165071,4 +10.623345182344577,4 +10.699204683423511,4 +8.211815333013147,1 +6.876819539601698,0 +7.139445782945478,0 +8.402242134200392,5 +10.582736506063146,4 +10.015358018167365,3 +10.610122136717237,3 +10.753741549152885,3 +9.968078851770091,4 +6.740074038445991,7 +10.108440685310063,4 +10.780359006903433,4 +10.617449938221938,4 +10.522386488250222,4 +10.695646717588174,4 +10.795995050486903,4 +10.448976432164626,4 +8.347600847902761,6 +8.128830329780243,5 +10.390549593549258,4 +10.815511629288258,4 +10.901188917956734,4 +8.214591790511738,2 +10.276998679774575,4 +10.542101993602142,4 +10.567444895185783,4 +8.449601203457963,5 +8.571402432138065,5 +10.45415200914226,4 +6.700146251589285,7 +10.29370856373758,4 +8.207921083246145,5 +8.040150234780045,2 +10.192465525047862,4 +10.788275442731361,4 +6.972900124552566,0 +6.823230133334615,0 +10.49399709843417,4 +10.831871843741919,4 +10.834310514906711,4 +9.764358471933512,3 +8.479919428613059,1 +10.128596031752705,4 +8.44299468454254,6 +8.576121455796107,6 +10.343086806953348,4 +8.531085210119373,5 +6.903726415068606,7 +10.123116453305386,4 +10.711156141581819,4 +6.70898418443759,7 +10.318681262785676,4 +6.98026955160909,0 +8.133368117187675,5 +10.560032026068145,4 +8.356076427309556,6 +6.746595189300984,0 +10.206963118131528,3 +8.111193241670362,6 +10.579087579228204,4 +10.759759859684744,4 +8.093263093875226,2 +10.134693361622169,4 +10.673186301419506,4 +10.692128162916594,4 +10.740112862079208,4 +10.651749624113455,4 +10.533908583698272,4 +6.97326238919746,0 +10.5869175131808,4 +10.496245576851596,4 +8.205028170139972,2 +6.913810161308469,0 +10.327806685778576,4 +10.540033263859268,4 +8.57242110963552,5 +8.28230146341211,1 +10.414651367465117,4 +6.6739431970834735,7 +10.41740922082782,4 +8.378834984453068,2 +10.249831112866003,4 +8.29621120758886,6 +10.6600155398745,4 +8.461750142253807,6 +10.60099659816588,4 +10.598566405854577,4 +10.81826016109528,4 +9.962074616366316,3 +10.812000982150119,3 +10.202675587905514,4 +9.916660289226453,3 +10.027077782738962,4 +10.701382470657862,4 +9.670453926898375,3 +10.191578665798861,4 +8.283912315791426,5 +10.323489091229396,4 +10.558941702818505,4 +10.754114204468536,4 +10.595555276625278,4 +8.41999096119526,5 +8.015367020106583,2 +8.34481970518646,1 +10.341513269692436,4 +10.614200145294024,4 +10.812670525914497,4 +10.646479619991908,4 +8.445063981506536,5 +10.473666276064085,4 +10.406700548334626,4 +10.676856394753306,4 +10.421578608307733,4 +10.600105198633539,4 +10.627709526754309,4 +10.657621266698227,4 +6.590637751788756,7 +10.002362010167507,4 +10.469636681752474,4 +10.670811717225106,4 +10.583982223131246,4 +10.526292704286968,4 +10.685928580924447,4 +8.087776219234772,1 +10.198310397777622,4 +10.366905842792628,4 +8.472965800844936,5 +6.76594646791972,0 +10.465876267383202,4 +8.421340802595633,5 +10.381768165316752,4 +10.869488048909334,4 +10.820496142689413,4 +10.424169334622317,4 +6.606043494574032,7 +10.2297323903012,4 +10.684746816615844,4 +10.589684148298065,4 +10.423482439778809,4 +6.721666779326603,7 +10.417400671012496,3 +9.900028265339596,4 +10.62901517799361,4 +10.691163935588,4 +10.62076235181315,4 +10.457922092951087,4 +10.810615624249783,4 +10.516023428208205,4 +10.654152652253766,4 +10.563232835324534,4 +10.547239558138545,4 +8.088488869498496,2 +10.29059088744203,4 +10.52970272492916,4 +10.781346475403483,4 +10.5601864582996,4 +10.067886084132441,3 +10.082710193557311,4 +10.338081062450096,4 +10.503185800293618,4 +10.887121436525039,4 +8.4704465968209,2 +8.593868506985146,2 +10.152533479480292,4 +10.590368627189283,4 +10.606226552663204,4 +10.643616226095153,4 +10.570882939900585,4 +10.504221102345607,4 +10.662236161332334,4 +10.482982643098742,4 +8.326260336616595,2 +8.357060347079871,6 +10.398060421665367,4 +10.859239149360222,4 +9.872996262715676,3 +10.211238150212333,4 +10.501322009659319,4 +10.700849348386145,4 +10.61395384128937,4 +10.522138490419811,4 +10.71448302194948,4 +10.687240273817906,4 +10.794757044202655,4 +10.496350562403284,4 +10.621792500770624,4 +10.775945709142945,4 +10.871918877180955,4 +10.686900829095784,4 +10.783380775391203,4 +10.540709219758325,4 +10.435221787445526,4 +10.564393760184002,4 +10.44663192954248,4 +10.73417124035175,4 +10.589426130450011,4 +6.7831536390560085,7 +10.336489975670693,4 +10.742878337178318,4 +10.654721653970851,4 +10.573116435386305,4 +10.845146343710452,4 +10.33375638921193,4 +10.691169422886043,4 +6.905838895593954,0 +10.30873698143784,4 +10.6542312281361,4 +10.415658499107002,4 +10.402172229152173,4 +10.735553674155028,4 +10.663612560776357,4 +10.916200776907061,4 +10.399687627160182,4 +10.5856126680844,4 +8.414370476917933,6 +10.55725550739967,4 +10.429224861310125,4 +10.53214248524994,4 +10.513260561260884,4 +10.821590952833493,4 +8.361890749256249,6 +10.235695519676693,4 +10.70517818494561,4 +10.723088206772088,4 +10.73099161965368,4 +8.270458279444886,1 +10.16812904751264,4 +10.659481212184167,4 +10.49609247782513,4 +10.532693785763605,4 +10.574179785359448,4 +10.70827522242708,4 +10.596201771767007,4 +10.38444381697015,4 +10.594763352796685,4 +8.21906778296719,2 +10.370504095140278,4 +10.52989006153881,4 +10.438146568279988,4 +10.479097941147533,4 +10.523532564912877,4 +10.63717520444291,4 +10.72329615880591,4 +10.829484644803717,4 +10.612654118076511,4 +10.643477964938324,4 +10.734817400392043,4 +8.440177242208039,5 +10.208335404272496,4 +10.710993346220818,4 +10.484255874289232,4 diff --git a/openAI_RRM/data_20190419/thomagent_3112.csv b/openAI_RRM/data_20190419/thomagent_3112.csv new file mode 100644 index 0000000..b74fab6 --- /dev/null +++ b/openAI_RRM/data_20190419/thomagent_3112.csv @@ -0,0 +1,630 @@ +4726.583770948741,0 +6506.01903752525,4 +6819.732848167426,5 +4665.3073948909505,0 +6520.880148479449,3 +7090.144009559101,1 +6815.772171355295,3 +6587.935526680192,5 +6790.330656799089,6 +6510.460427625048,2 +4635.488560796568,7 +6418.3345936832175,2 +4857.369464054169,0 +6902.88710268121,6 +6578.347668208352,3 +6154.052959977751,4 +6236.622211158789,3 +4875.5351192833505,7 +6292.765368571195,4 +4661.228353301766,7 +6692.739126142189,1 +6450.466157309237,4 +6574.539527291648,1 +6573.263200758867,2 +6855.893623854546,6 +4747.836474940563,7 +7091.686481823985,1 +6642.188664534659,3 +6550.501914945401,2 +4730.2447125279405,7 +6558.402532414399,4 +6480.008909957718,3 +6999.899940196131,1 +6512.001748980701,2 +6932.01451630692,2 +6768.602089446488,2 +6724.185296935027,2 +6836.766090677114,3 +6840.917247394207,1 +6298.148697050608,4 +6686.172673655941,5 +4973.761784622589,7 +6745.003458801378,3 +6756.387784467746,6 +6840.273791681527,4 +6869.706795208637,1 +6814.6400018967815,5 +6694.4897056840655,3 +6313.147217789797,4 +7007.673611377591,4 +6270.739842384584,2 +7173.100207327832,6 +4767.7758461881795,7 +6569.587077219348,5 +6817.091171801629,5 +6952.70162785863,1 +7133.711986076643,1 +6882.550248776022,5 +7033.158577774701,1 +6683.759518144136,5 +6482.568711995341,3 +6846.680004599786,6 +6510.620108985187,5 +6752.315554748924,6 +6547.9202963038315,5 +6786.909600688153,1 +4967.443490404249,0 +6641.204407536967,5 +6569.364773256428,3 +6918.8563454273635,3 +6791.360449667722,6 +4825.932424295655,7 +6414.66691982778,4 +6907.699762286447,4 +6773.067856712133,1 +7006.695288715602,1 +6666.775214420044,3 +6803.943435505373,6 +6253.858701431852,5 +6901.139944794222,6 +6674.149194679771,1 +6540.14414467443,3 +4712.025716649294,0 +4917.987912785473,0 +6702.993794845398,2 +4723.844042152061,7 +6854.835933197556,1 +6344.577861985344,2 +6886.4459493475215,6 +7287.0347977983565,6 +7448.104510205641,6 +6602.066986432192,4 +4712.104049868485,7 +6838.877423096601,6 +6574.549786166743,5 +6534.507667739161,3 +6849.274855277696,3 +6383.659600042243,4 +6699.718614777707,1 +6480.476444911588,2 +6736.785344704496,3 +6498.968024812076,4 +6509.353878303233,2 +4646.874313512967,0 +6686.049410823534,4 +7027.767673724881,6 +6848.824455906077,4 +6666.278650371969,2 +6625.850670529142,4 +6879.471731581919,6 +6977.7375913646365,6 +7100.626545938007,6 +6430.402305899699,3 +6460.255107474174,5 +4724.617755010336,0 +6827.3833596170525,1 +4927.894554233038,0 +6772.211428774723,2 +6912.385705832085,6 +6851.310303986916,2 +6253.915974415101,5 +6617.762918876424,4 +6488.526044766867,2 +6169.6472063987185,5 +7213.218357924382,1 +6867.748815644327,5 +6918.33158685993,5 +6791.462025221129,4 +7012.330212850029,6 +6532.385092284261,2 +6341.0649869652525,4 +7071.67625594787,6 +6555.754416331552,1 +6638.048758592245,3 +6720.160451729105,3 +6523.617552923813,5 +6896.766465778542,1 +6469.2223975548095,2 +6795.408503132324,2 +6711.30815213748,3 +6809.734262786927,6 +6488.112791956408,5 +6421.927511143858,2 +6670.267882538749,2 +6353.812272340946,5 +6773.066482558616,6 +6950.169798355646,4 +6322.0438894388335,2 +6740.778667992463,3 +7181.179908813653,1 +6707.744961398099,2 +6670.087823798758,3 +6725.416217740019,2 +7118.079107847797,6 +6596.578183970384,2 +6195.201143486743,5 +6556.333495017416,3 +7162.009166951452,1 +6718.796469049854,4 +7055.791369991619,6 +6700.27576102535,2 +6857.148639439893,1 +6762.858186744903,3 +6341.054477667647,4 +6422.040720466387,3 +6911.039009158492,1 +4761.117914525239,0 +6498.605949279907,3 +6496.963753915143,5 +6939.728445397315,6 +6307.254286934067,3 +6686.648720419727,6 +4790.016182862088,7 +6810.102614714142,6 +7013.384287467183,6 +6383.264450367278,3 +6477.906699276913,4 +6906.469779413795,1 +6461.519370159827,4 +6429.494649467426,2 +6965.61635226687,6 +6767.8597870046215,2 +6504.313892619573,5 +4842.288701265012,7 +6990.837731334252,6 +6581.799665800612,1 +6279.792895306004,2 +7070.817723115797,6 +6625.534843455987,2 +6783.069134751566,6 +7178.867783787074,6 +7222.1686983174495,6 +6768.866952938424,1 +6605.526289924022,3 +7194.593908814582,1 +7084.3281595078115,1 +6944.5712452997,1 +6617.962270096844,6 +6562.159508352914,1 +7332.316747942276,1 +6684.483719660237,3 +6687.299786942702,3 +6528.3639626502145,5 +6893.661817900852,6 +6878.030619170293,4 +6766.723397207286,5 +7111.555638704797,1 +6452.388827271816,4 +6895.939163147777,1 +6689.492940515687,6 +7272.507055475389,6 +7003.533628983656,6 +7027.822589263015,6 +6239.670182664895,3 +6849.9246998682875,6 +6509.435910980421,3 +7215.792345467358,1 +6729.240612503179,6 +6949.756042803154,2 +6941.806139477213,2 +6725.298763051771,1 +6501.4793962907315,6 +6841.195098810346,4 +6667.875241127266,1 +6637.763064165907,6 +4781.890038349574,7 +6727.656845343796,6 +6698.1404391099295,1 +6327.117157844632,4 +6397.364553780272,2 +6391.289486291149,5 +6929.046221830446,6 +6668.629600696243,2 +6659.772302287882,1 +7024.557120878943,1 +6542.431040703648,6 +6662.197336385083,2 +6860.217378735422,6 +7165.897028085659,6 +6421.0144446801405,3 +6715.302411932873,6 +7179.501657489152,6 +7107.341826671578,6 +7133.999263130792,6 +6822.202656066354,4 +6658.311026989245,1 +6679.171766188013,6 +6432.72186618061,3 +7066.167176633809,1 +6593.807674822686,6 +6528.963805530141,5 +6419.536372999682,2 +6775.571847957824,1 +6415.095021942045,2 +6543.103974523736,4 +6684.675059779556,4 +6518.2637343481365,5 +6902.615327602836,5 +4806.297150338174,7 +6882.032903261584,6 +7197.138200766443,6 +6895.411922079926,6 +4848.798493163935,0 +6594.780974361671,5 +6990.669944398589,1 +6707.247231013151,6 +6798.947530915439,1 +6593.648164088919,4 +6450.4936097171585,2 +7028.673331827835,6 +6502.474695592332,5 +6908.648349099276,5 +6776.715614595779,6 +6754.416114628619,4 +6352.549111223198,3 +6793.424792883569,3 +6847.2128819061545,1 +6850.115980357173,5 +6728.887055493863,1 +6512.324304082998,2 +6921.897375256663,6 +6574.983612854163,2 +6765.911746473777,1 +7204.719434491789,1 +6793.701933445064,5 +4747.834434240344,0 +6904.099464949432,1 +6556.498650649859,4 +6487.452898028252,4 +7091.137623194211,6 +6558.203083947783,4 +7052.267436328557,6 +6451.585204603681,3 +6663.051703433664,6 +7200.181161911677,6 +6467.772751668658,1 +6942.784880897236,1 +4980.041093171701,7 +6889.707728421912,6 +7032.429135546456,6 +6642.888844898102,1 +6573.096381704346,2 +6364.6574401626885,5 +7111.534007576396,1 +6368.1754960731205,2 +6711.470438285173,1 +7106.245506009739,1 +6731.085262346296,6 +6659.591905604654,1 +6446.582669295999,6 +6546.195659912682,5 +6970.17923840936,1 +6557.912850896749,3 +6526.928095403732,2 +6430.677534939066,5 +6599.8329935525135,3 +4811.682056402591,7 +6593.602107468692,3 +6464.431877898666,5 +7160.100534378295,1 +6524.230119418091,4 +4776.751814909581,7 +6480.122426673419,4 +6840.609977252182,1 +6587.744353181637,4 +6454.811005150963,3 +6828.409290187323,1 +6817.769825633437,6 +6913.531015191991,1 +6675.94938140968,6 +6718.061335340755,1 +6655.35597536684,3 +7035.631405915873,1 +6526.989352728323,5 +4710.69414732385,0 +6529.030923763831,5 +6934.088731098336,1 +6582.898814319858,2 +6575.160590752268,4 +6818.052868696928,6 +6690.434919614663,1 +6514.644421178833,3 +6427.053256419319,4 +6882.259834612045,1 +7239.7910149774,1 +6867.784162074432,6 +6678.50613881392,1 +6539.781586795751,6 +6576.242517202892,1 +7051.70380056868,1 +6465.337745386526,6 +6868.268358935833,1 +6455.535408774737,5 +6444.123665003491,3 +6544.861092721355,4 +6765.947426427194,1 +6966.006408960935,1 +6962.193114784721,1 +6622.514590624693,6 +6928.792413189897,2 +6431.478778548237,5 +6806.407966501839,6 +7643.492744348258,6 +4677.306232233379,7 +6791.046940705279,3 +6624.141455698888,2 +6428.530571765004,4 +6757.053869009808,1 +7035.788266830386,1 +6741.610473216706,3 +7045.320470653778,1 +6835.681560324988,1 +6753.142802151055,6 +6977.611196456271,6 +7059.836955197423,6 +6564.869634288098,5 +6681.662043091828,4 +7033.283563312336,6 +7013.053319412538,6 +7050.331758559631,6 +7245.475931021296,6 +7076.565597959222,6 +6853.255376002945,1 +6644.535513538003,6 +6734.9263998093475,2 +6643.2931863498,3 +6980.523046569875,1 +6464.550890823937,2 +6402.559795335019,4 +6987.894292552613,1 +7059.717493749915,1 +7278.277375213479,1 +6608.627680862051,4 +6802.36804876291,1 +6587.821800881063,6 +7125.984587899962,6 +4745.515823028452,7 +6858.328887472049,6 +6893.575138086557,4 +6665.308495469184,4 +6719.42988581498,5 +7043.72525837771,1 +7238.4531873183205,1 +7209.206616183026,1 +6608.327048924957,6 +7008.178989242018,6 +6564.509534498061,5 +6702.703714473036,6 +6754.818150164723,1 +7214.136425814089,1 +6355.983154752968,4 +6902.682794982834,6 +6628.443863081731,2 +6574.894316608949,3 +7035.541357597745,1 +6334.818633856557,4 +6807.387538335297,6 +6584.044006248558,4 +6919.844152715309,6 +6766.371071056383,1 +7129.312848324977,1 +6634.373763850145,2 +6811.034759465902,1 +6755.087650624575,6 +6726.604380837529,1 +7026.057053336324,1 +6523.8427754525055,4 +6857.279157407817,1 +6442.815741652059,2 +6821.726188392531,1 +6973.86820068243,6 +7008.076682668492,6 +6588.598818763234,3 +6527.995203995283,6 +7248.367751962529,6 +6765.812846446923,1 +6784.960951544089,6 +6953.595867495968,6 +6944.675543394124,2 +6913.9850980575675,6 +6800.012189642919,1 +6711.608809817665,6 +7148.715113914629,6 +7099.702549175803,6 +6698.474024531293,1 +6396.5480773861855,2 +6969.298966019816,1 +6806.4208527139335,6 +6470.702392681569,1 +6551.873466175245,2 +6829.102089800798,1 +6653.580693272241,5 +6670.174526681627,5 +6665.159605087423,4 +6982.804712671146,1 +6762.1702255702485,6 +6646.6281468479565,1 +6531.10098705186,4 +6912.33452071038,6 +6827.723822586028,1 +6514.459256077422,3 +6804.694557511722,6 +6538.1972389417315,5 +7078.714392264243,1 +6468.1038869036975,3 +6510.856297270849,5 +7042.896894433917,6 +6740.755868771673,1 +7026.316489783123,1 +6534.162053666783,3 +6854.86054740816,3 +6544.526613796688,5 +6703.380721720078,6 +6502.433236809886,5 +6538.646807655236,2 +7202.076305387247,6 +6601.220206279881,3 +6942.596652143229,3 +6749.062170140904,6 +6610.0065784428625,3 +7088.1981238402095,1 +6882.19740006135,6 +6748.611892041983,1 +7151.277985183124,1 +6750.537162026599,6 +6270.372443612368,3 +4734.073117247083,0 +6950.083885868064,6 +6487.972532439468,2 +6726.094144703613,1 +7153.432688313429,1 +7009.062327935331,1 +6644.383427337385,3 +6962.504954900626,1 +6621.402388667736,4 +6782.397440415967,1 +6647.030404126796,2 +7224.929138472891,6 +6608.518417814554,1 +6368.706708669998,4 +7012.836103729059,1 +6485.144427367362,6 +6752.058976667136,1 +6606.183180645693,6 +7017.752429853453,6 +6696.094400098447,1 +7038.997622049156,1 +6577.728656066365,5 +6877.305882789777,1 +7277.567286952831,1 +6788.797395065522,6 +6669.788263900459,4 +6799.568733077409,6 +6643.477064526698,3 +7037.070967687524,1 +7087.510138071448,1 +7086.308804314245,1 +6957.488935966199,1 +7007.6894316534745,1 +6525.626292907609,4 +6612.831668178769,1 +6933.655642269927,1 +6920.998619909258,6 +6636.679662602487,1 +7281.8373673025235,1 +7060.657527782616,1 +7013.4229370844405,1 +6678.072336008778,6 +7090.79108927447,6 +6765.330464690632,1 +7124.388784096479,1 +7309.896195431899,1 +6975.561290369494,1 +7096.686670133886,1 +7135.182190794343,1 +6696.105501865279,6 +6810.678504264907,1 +6889.510856263004,3 +6986.56561462748,1 +7216.009553506323,1 +7037.004015988211,1 +6630.461244044405,3 +6977.282345635189,1 +6835.583289146264,1 +6574.869556973135,6 +6455.298954695393,5 +6605.092298335918,6 +6473.653164276389,1 +6605.102314129782,2 +6854.679568634519,2 +6745.004961163578,1 +6984.301201074132,1 +6763.836577430097,6 +6690.17751752981,1 +7135.665748219999,1 +6979.705714431099,6 +6779.69127501203,1 +6895.832413858248,6 +6382.806696231731,3 +7093.77921258303,1 +7189.613027617213,1 +7038.140532930157,1 +7258.469434886443,1 +7146.205216451886,1 +6737.4106626989105,5 +6868.373698733518,6 +6616.102734416038,1 +6766.417740055118,6 +6682.922092472732,1 +6661.938477737485,6 +6746.69296662486,1 +6984.553859314039,1 +6925.917294390932,1 +7182.686652121508,1 +6556.359866228295,5 +6712.05309539846,1 +6891.349641876419,6 +6825.779149741264,1 +6905.440530981521,1 +6501.373282310099,3 +7270.404885630591,1 +7122.650222358338,1 +7065.077341870995,1 +6721.869044483297,6 +6312.156727512685,5 +7000.358466180085,1 +7001.68060046645,1 +6712.770333473434,6 +6673.378916251139,1 +6718.979772815572,3 +6353.474753789673,4 +6520.757437091273,2 +6280.84418141667,5 +7118.130312511458,1 +6804.089788182993,1 +6896.00206853211,6 +6656.554983868729,1 +6569.742442466311,5 +6354.706272908872,2 +6599.002853506478,2 +6655.8473281137685,3 +7106.845020761222,1 +7062.34085849223,1 +6493.204438708931,5 +7010.013643840379,1 +7238.164389292178,1 +6919.784676563293,1 +7181.703936595872,1 +7203.309412995769,1 +7094.201994113319,1 +6524.27002598332,2 +6744.093238771813,1 +6990.89973060369,1 +7090.952055981292,1 +7085.841254597783,1 +7167.307259605002,1 +7010.279876678065,1 +7108.328572886337,1 +6940.691360105309,1 +6643.084062908825,6 +4753.294566062028,0 +6954.561450792108,1 +6686.603860086715,6 +7220.43175793978,6 +6727.403436031007,1 +7144.999933137424,1 +6865.544516450215,6 +6547.911368916412,1 +6838.503643614076,6 +6535.911956660459,3 diff --git a/openAI_RRM/data_20190419/thomsonagent_22_1.csv b/openAI_RRM/data_20190419/thomsonagent_22_1.csv new file mode 100644 index 0000000..67b6155 --- /dev/null +++ b/openAI_RRM/data_20190419/thomsonagent_22_1.csv @@ -0,0 +1,357 @@ +4691.821378295467,3 +4747.781868281091,0 +6810.2060767546245,2 +7039.156893176712,2 +4830.162966468673,3 +6736.1631786527,2 +6621.285074378641,1 +6961.169519539971,1 +7101.783200719381,1 +6547.770972246577,2 +4826.413908898835,3 +6995.178933799551,1 +6661.791828381669,2 +4818.054599131663,0 +6808.930562305568,1 +4772.487320072658,0 +6844.673423714632,2 +6526.7480620809965,1 +6926.971545463797,1 +7223.426363076649,1 +4782.120118249073,3 +6732.432366466177,1 +4945.280877215412,0 +6780.284699023344,2 +7054.588603967637,2 +6524.288752037295,1 +4827.098719244828,3 +6968.374231575206,1 +6948.2172472770235,1 +7130.853332289742,1 +7185.099541337189,1 +4795.221234581697,0 +6937.902576969467,1 +6747.162291354672,2 +7203.684011405461,2 +6591.502687276999,1 +7150.690280638164,1 +6629.254116124082,2 +6980.397785054955,2 +6854.470705549975,2 +4874.580806834957,3 +4549.662010995433,0 +6934.550553263904,1 +6878.637453606209,1 +6997.222642650669,1 +7051.5748606346915,1 +6692.416320091207,2 +6997.508141314875,2 +6729.368849391063,1 +6501.6933641289415,2 +7038.98911894661,2 +7160.71242640548,2 +6548.5090802738705,1 +7067.621101058454,1 +6907.693836839659,1 +4834.340496600629,0 +6821.898342377015,1 +4840.792183226366,3 +4512.869859309449,0 +7022.5136386021695,1 +7032.786118495609,1 +6914.1831350808525,1 +6501.751425537415,2 +7062.235441886667,2 +7323.957079287262,2 +6607.9519179381105,1 +6398.185329153932,2 +7114.957643358861,2 +6627.13641568039,1 +6678.9999908459695,2 +7159.997717239907,2 +7016.395617397477,2 +4856.943465878148,3 +6727.844238526697,2 +6590.862737134234,1 +6560.948575586983,2 +6942.69215828935,2 +6612.178786839411,1 +7173.488170719587,1 +6592.444334135262,2 +6944.441798263471,2 +7224.780696672596,2 +6945.938735897036,2 +6550.4469482043005,1 +6657.691951180514,2 +7151.070536389016,2 +6540.913186670945,1 +6673.955569202702,2 +6604.220267472139,1 +6706.576797549426,2 +6321.703335570578,1 +6622.513259189511,2 +6517.32043941299,1 +7091.774990877828,1 +6561.496328662608,2 +6986.510416320301,2 +6498.260594242665,1 +7039.465072851414,1 +6972.862073848125,1 +6614.340379701002,2 +6481.685939418008,1 +6488.285975576652,2 +7052.235317721339,2 +7145.689039627486,2 +7066.854677876267,2 +4854.523116224001,0 +6560.62442949931,1 +6927.93555980382,1 +6557.960507764279,2 +6572.370459188744,1 +4763.159041763653,0 +6944.48319086769,1 +7255.710893681628,1 +4921.49892626791,0 +7006.53001602364,1 +6371.427138956726,2 +7154.024503480147,2 +6714.136902797468,1 +6637.6811241391715,2 +6593.462202978282,1 +6603.482305204965,2 +7036.667953357609,2 +4785.083875954887,3 +6861.061411826496,1 +6546.701789116396,2 +6789.095683365303,1 +7188.915514573735,1 +7175.342409938945,1 +4666.734401987723,3 +5021.315173393105,3 +6586.4952151873385,2 +7083.9392042188965,2 +6862.459884353468,2 +6716.824784641077,1 +6620.34051370578,2 +6528.92477648543,1 +6511.046832882697,2 +6760.240105898853,1 +6554.197358390977,2 +6660.982736353355,1 +6890.159403797978,1 +6425.44951092657,2 +4903.001999330992,3 +6844.683458460024,2 +6936.6678481833715,2 +6998.42416012928,2 +7077.3879441923755,2 +7034.938554189472,2 +7097.288341481848,2 +6638.207099858215,1 +7206.595874027358,1 +6569.580073162164,2 +6593.935699010321,1 +7305.82092972559,1 +6609.776530784373,2 +7172.817743197306,2 +7293.308871483507,2 +7190.329098661321,2 +6486.494935291109,1 +6674.57087991865,2 +7196.381986518582,2 +7075.09789970231,2 +7029.909756809148,2 +7235.312859217771,2 +7205.213428596774,2 +6928.025037643271,2 +6660.165219070677,1 +6704.164787541989,2 +7129.090494226133,2 +7121.598534156548,2 +6608.335644625997,1 +6560.789003518012,2 +6914.928157856668,2 +6561.320295884463,1 +7122.295478528242,1 +6435.995915857704,2 +7063.364602001561,2 +7125.697143287129,2 +7030.455829908954,2 +7036.7300966861885,2 +6931.157715272526,2 +6770.914368195778,1 +4879.579490044641,0 +6713.511344825237,1 +6883.01289405049,1 +6493.640150587758,2 +7205.999618053382,2 +7150.202277333244,2 +7016.497455758246,2 +6537.783053935472,1 +6581.542212872458,2 +6632.877681526956,1 +6600.952860039681,2 +7019.692235436036,2 +4815.886498133065,3 +6863.858806267042,2 +7082.082164465789,2 +6986.349443285333,2 +7129.4332491310715,2 +6925.937816826795,2 +6978.687347729595,2 +6792.221795494711,2 +7165.078744093788,2 +7038.252790586417,2 +6607.54953141654,1 +7116.021701876727,1 +6640.8227091905555,2 +6512.640630677112,1 +6653.316334501748,2 +6927.471661836795,2 +7036.681811649009,2 +6591.733289440957,1 +7020.875341238627,1 +6856.627085162561,1 +6703.722322250835,2 +7063.718627707796,2 +7007.008419527444,2 +7153.7122844750975,2 +6860.900011379291,2 +7033.895003969097,2 +6557.221679683792,1 +6574.748898327179,2 +6572.408628261485,1 +6984.372613901677,1 +6558.9188755447385,2 +6943.768009209908,2 +6560.190574653687,1 +6514.417182670696,2 +6955.563401923424,2 +6818.6528607667315,2 +7065.434240387544,2 +4773.6889776222,3 +6875.60738523623,1 +6635.833574394149,2 +6776.7699312155355,1 +7206.446723287897,1 +6588.572470652449,2 +7055.703595185254,2 +7064.332937353028,2 +6558.393362231684,1 +7023.411201195939,1 +6681.006883499904,2 +6957.085651008152,2 +7010.8995601015995,2 +7137.2185588555385,2 +6981.576214315369,2 +7174.75112198281,2 +7010.073393016178,2 +6557.497914976642,1 +7120.249858161738,1 +7025.746763402194,1 +6427.927286669994,2 +7167.130521794266,2 +7078.491907453559,2 +6654.650929985208,1 +6740.942139544376,2 +7106.867644058926,2 +6967.119861490053,2 +7096.609802666844,2 +7089.391190278775,2 +6576.771598851629,1 +6905.097814695343,1 +6597.984333278693,2 +6969.493803030655,2 +7064.1554462878175,2 +4854.494118390365,3 +6893.2204666676025,2 +7112.382943674751,2 +7122.95822828163,2 +7093.190398455739,2 +6890.383579778461,2 +7041.510612836343,2 +7098.174643653686,2 +7018.589908395769,2 +7292.864919010752,2 +7027.194784472649,2 +6976.016140966211,2 +6551.76211582348,1 +6615.035337998663,2 +6532.826245154738,1 +7217.916274213911,1 +6477.8260020750795,2 +7094.918111251257,2 +7075.583174225198,2 +4820.735713299038,0 +6941.901220321613,1 +7111.6681272595415,1 +6601.2606840605295,2 +6503.002300922998,1 +6588.298716597173,2 +7074.982596462326,2 +7074.729830791934,2 +6936.167807644136,2 +6841.853773067742,2 +6882.5928794707725,2 +7005.326032307725,2 +6636.47053046202,1 +6601.380455818208,2 +6960.6525917365125,2 +4808.967534758443,3 +6848.405625038253,2 +7034.016884194765,2 +7160.606536783185,2 +6614.785725339969,1 +6842.93824051167,1 +7063.967601032025,1 +6918.119692751169,1 +6798.520173646348,2 +7149.599855318848,2 +7035.434663748853,2 +7014.866795232548,2 +7016.091830894513,2 +7112.310457804022,2 +7208.711391254497,2 +7147.662707978803,2 +7109.300818489204,2 +7156.013790169071,2 +6464.298862795397,1 +6949.92394634094,1 +6582.769260201776,2 +7037.388170087898,2 +6970.943838333824,2 +6904.320530282486,2 +6791.045554624641,2 +7098.993115860696,2 +6959.887812219937,2 +6935.358909391219,2 +7165.778448692931,2 +6972.881387005233,2 +6879.141976966021,2 +7190.836891885245,2 +7029.698815322232,2 +7278.303982379205,2 +6878.8290471481105,2 +6689.37736287149,1 +6666.421194006113,2 +6770.273590733532,2 +7057.731230184832,2 +7252.132052853777,2 +6984.334459252127,2 +7039.26360829572,2 +7123.446773473285,2 +7013.95815944945,2 +6651.929259800371,1 +6813.0960353108485,2 +7007.983572361958,2 +7116.839044085536,2 +6873.254245475726,2 +7100.7735615513675,2 +7015.5154417313015,2 +7195.835062829428,2 +6941.747979328004,2 +7299.395092183973,2 +6998.865054803824,2 +7025.500533767707,2 +7261.513050565645,2 +4958.5172916365345,3 diff --git a/openAI_RRM/data_20190419/thomsonagent_3222.csv b/openAI_RRM/data_20190419/thomsonagent_3222.csv new file mode 100644 index 0000000..67bf16a --- /dev/null +++ b/openAI_RRM/data_20190419/thomsonagent_3222.csv @@ -0,0 +1,376 @@ +6749.705702782545,0 +8191.53045713126,6 +8006.39384871145,3 +8243.270777417089,6 +8417.433311754394,4 +10125.486164909367,2 +6977.537563040518,0 +7061.910809287144,0 +8473.39269458296,4 +10186.26855501716,2 +9923.922440576964,5 +10147.018086858428,2 +6916.103071375785,0 +8213.634187912854,3 +6943.583442967419,7 +6975.167061449313,7 +8360.012717024747,4 +6725.669314201694,7 +8155.6161497775,4 +10406.804829288541,5 +10850.121474682928,5 +6720.568134691852,0 +8422.725461874463,6 +8075.626087790713,1 +8421.446358507485,1 +8579.895806128845,1 +8790.715788349458,1 +10186.92415543578,5 +8054.1202515729965,6 +8035.491142443709,1 +10532.435593279084,5 +8387.31057986762,4 +8475.50200747002,6 +8527.248250176808,4 +10812.632738834405,5 +9689.50375673644,2 +8449.509609343711,3 +8473.687495214703,3 +7125.152845414024,7 +9941.851522604313,2 +9884.110851010752,5 +8170.804704468574,4 +8564.51721198768,4 +10445.158759256843,2 +7005.186784723082,7 +8231.09548517976,4 +8069.438448339008,1 +8320.911543375772,3 +7956.330750447252,4 +8265.121751526112,1 +8002.9622516350955,6 +8271.146614122747,3 +6956.525448959227,7 +7034.503573184953,7 +8251.881628404439,4 +8472.158121226625,6 +10498.747133126764,2 +10060.733795487324,5 +10930.72229240521,5 +6734.25712864296,0 +10443.13236133557,5 +9982.196774955952,2 +10658.05075221081,2 +9882.703878203998,5 +6750.232707306854,0 +10052.753943820357,2 +10682.147263258525,2 +9843.638240984968,5 +10999.129405187598,5 +8315.7652270125,3 +10421.276363444995,2 +8282.420216006925,3 +8091.040166121201,6 +10448.834377862486,2 +6785.280170288703,7 +8202.701440846955,1 +8411.925454711463,3 +6850.307857129235,7 +8384.64936644053,6 +8169.665160400943,3 +8660.98499099975,3 +10183.417763870884,5 +10709.806736186472,5 +10553.165309272455,5 +6689.665744220265,0 +10073.899855289566,5 +9900.052506898373,2 +10148.150980277882,5 +10765.36132639973,5 +10491.865817342314,5 +10095.864389668963,2 +9882.847093054042,5 +10079.125564103011,2 +8212.500414746279,1 +10167.025335597165,2 +10037.316842374204,5 +10788.748475658307,5 +8261.371086580933,6 +10214.014764612433,5 +10256.316682740162,2 +8462.89004335393,6 +6773.696683465912,7 +10411.667056950915,5 +9882.422424592654,2 +10714.039030283533,2 +8502.881288015444,6 +7935.699062134904,1 +8241.33398954639,4 +8575.18628956531,4 +8718.043977237941,4 +9977.483849995395,2 +10071.188295499782,5 +8316.066940801362,1 +10228.525750941979,5 +10402.445818752143,5 +9861.550340127387,2 +8000.06386824836,4 +10403.038177070419,2 +9821.066496720427,5 +10801.668115150635,5 +8146.647012635448,6 +10621.58493773823,2 +10106.845454243576,5 +10570.911125711922,5 +8324.08480213858,4 +8574.818686929328,4 +9884.08413425567,2 +10700.596661953588,2 +8244.581635117622,6 +10433.866775673545,2 +10432.69081403543,2 +10007.37528327169,5 +8287.927638707946,3 +10418.51748653757,5 +8491.336879390203,1 +10405.560064797417,5 +6877.185453385623,0 +8131.531060365645,6 +10305.916175334856,2 +10757.542026245786,2 +10732.277699913004,2 +10876.366920748056,2 +9872.313068161206,5 +10765.944586451196,5 +8341.8089866868,1 +10003.453020131221,2 +10036.197672940794,5 +10773.934496741964,5 +8300.467772314256,4 +10087.187121580428,2 +10794.918756444464,2 +9874.897239099146,5 +10156.892694680919,2 +8213.388105648775,1 +10341.837187197236,5 +8360.871862582393,3 +10436.45802890982,5 +8171.659572046002,3 +8421.747913228599,1 +10263.516331356604,5 +6898.034712156686,7 +10291.492755156547,2 +9978.02172052964,5 +9979.081602122726,2 +10121.154613908295,5 +10612.937430459402,5 +10676.749947049564,5 +10898.624886934726,5 +10804.345096338742,5 +6974.1580685553145,7 +10334.482602343114,5 +10088.098102592761,2 +10581.956034335864,2 +10815.268319807898,2 +10440.292401569108,2 +8497.231841088267,6 +10228.605549002987,5 +9743.532439312092,2 +10962.000956113334,2 +10043.814937230303,5 +10630.814921082252,5 +6885.111216820775,7 +8085.907984777696,1 +10052.727089271035,2 +10043.31907466746,5 +10037.462579299598,2 +10720.221823825357,2 +10073.260856171326,5 +6744.876483091984,0 +10216.86824232489,5 +10676.988226388738,5 +9733.020749547883,2 +8307.335193491255,3 +10311.367199879365,5 +9900.338007061357,2 +10817.825843309702,2 +8311.569847005067,6 +10271.877793682917,2 +8109.4920354820515,1 +10110.38090250417,2 +9849.078782001736,5 +8490.13808531046,1 +10177.621173240477,2 +8383.41312425552,3 +8317.484909703748,3 +10440.593318239258,2 +10655.614748223634,2 +10176.35263900949,5 +8326.677277740975,6 +10042.246560288695,5 +10675.931893914945,5 +9871.333447665602,2 +9739.094775424146,5 +9820.224731553462,2 +10476.739915673345,2 +10524.382730663481,2 +10590.624660022402,2 +9870.03007759726,5 +10241.678611154874,2 +6852.052363515989,0 +10408.47489250419,2 +10534.563528640836,2 +10117.410260816512,5 +10836.533990193986,5 +8261.21391907135,3 +10535.334425601552,2 +10643.829159534565,2 +10024.720712417942,5 +8411.992974277966,1 +10274.805757377713,5 +10066.965464954113,2 +10774.776845564014,2 +10056.433028286376,5 +9994.108544353183,2 +8257.238090131588,1 +10420.213264179656,2 +10931.226584777745,2 +6710.692724221412,7 +8060.3724471447995,4 +10543.565986135978,5 +8174.398258893029,3 +10373.268025558373,5 +9897.262504432783,2 +10492.669982805764,2 +10249.31380632124,5 +8561.025204556541,4 +10317.199379940312,5 +10077.152205303973,2 +9990.547445918222,5 +10161.622011008358,2 +10640.188576093939,2 +6808.006371130064,7 +10167.611798333046,2 +9997.445643984922,5 +10067.082874002388,2 +9985.921910653597,5 +9917.452250316106,2 +9929.307615594533,5 +6882.670426919021,7 +10226.402990855597,2 +8416.962764266525,1 +10288.477793025886,2 +10632.656527959163,2 +8421.848211349155,6 +10299.816853725728,2 +10747.674493905433,2 +10831.67378272185,2 +9784.309103329826,5 +9884.743984126591,2 +10572.206803301407,2 +10007.816862799704,5 +10792.043258649923,5 +10622.588378257102,5 +8212.973560800894,3 +10624.581826653564,2 +9994.456189480545,5 +10061.687371438567,2 +10677.315025762142,2 +8234.561901877754,6 +10324.856060396329,5 +9952.491932853058,2 +10115.060194753967,5 +9934.710977912879,2 +10585.406354169103,2 +10721.73322046106,2 +7034.713737231365,0 +10485.22561843986,5 +10191.96710826229,2 +10032.551097773714,5 +9955.163143768672,2 +10676.954330279434,2 +10675.584892289471,2 +10039.087188344825,5 +10617.388113955829,5 +8101.860343602208,4 +10216.662559711971,2 +8410.343579503766,1 +10139.486513672802,2 +9732.20256258494,5 +10847.189836255726,5 +10716.412254361407,5 +10765.643667142316,5 +10809.919691125706,5 +10618.013795012645,5 +10351.9081328994,5 +10569.886710176965,5 +10230.915190705004,2 +10618.329193033018,2 +9997.736733484067,5 +10889.018467828526,5 +10483.522726905367,5 +10741.35176063109,5 +10725.751820216812,5 +10587.512562531314,5 +10609.074911214364,5 +10633.355555366164,5 +10940.288040214602,5 +10114.347717194669,2 +9936.005900225598,5 +10743.195740329284,5 +10205.415105269809,2 +10896.248555815648,2 +10020.433022182107,5 +10077.114077932469,2 +8312.769394555939,3 +10613.19486690755,2 +10498.119777573058,2 +9817.019508058725,5 +10695.887464930116,5 +10099.514621335422,2 +10508.408392967644,2 +9791.61691792251,5 +10661.56850418523,5 +9831.289939659788,2 +10101.483830179672,5 +10113.229075131458,2 +10205.624439497225,5 +10106.656905115113,2 +10053.085226470432,5 +10794.491721810704,5 +10750.337832540914,5 +10284.781500801862,2 +6976.629907750449,0 +10412.065345820252,2 +10733.388908705077,2 +10028.785663508062,5 +10076.197712976787,2 +10494.706241489588,2 +10003.768303432984,5 +10711.733960320216,5 +9883.076557993676,2 +10773.58485296789,2 +10594.070875233161,2 +10610.918422429952,2 +9868.267383635033,5 +10420.463710512946,2 +10261.072965869002,5 +9902.523731152582,2 +9892.525849670867,5 +10158.632848889032,2 +10805.328814455557,2 +10157.35195878943,5 +10393.808104443066,5 +6901.914875067307,0 +10663.256353066776,2 +10468.606427677329,2 +10491.591222765916,2 +10782.093687452849,2 +10614.242471784924,2 +10855.247727235908,2 +10313.197220898332,2 +9774.67610551284,5 +10290.19502768858,2 +10644.418356164255,2 +10185.932984031024,5 +10285.08333562797,2 From 5b844c57d18043c6fbf2b3ebd8bb540d722dc1a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Thu, 25 Apr 2019 18:46:09 +0200 Subject: [PATCH 23/54] disable plot --- openAI_RRM/rrm_agent.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 969710c..9f47271 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -11,13 +11,13 @@ import logging import time import csv -import matplotlib.pyplot as plt from math import * parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) +parser.add_argument('--plot', help='activate plotting', default=None) args = parser.parse_args() if not args.config: print("No config file specified!") @@ -25,6 +25,9 @@ if not args.output: print("No output file specified! - Skip data") +if args.plot + import matplotlib.pyplot as plt + #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() @@ -133,16 +136,17 @@ print ("Channel selection:" + str(action)) print ("next step") - plt.subplot(211) - plt.plot(run, reward, 'bo') # Additional point - plt.ylabel('reward') - plt.subplot(212) - #for ap in range(0, aps): - # plt.plot(actions[ap]) - plt.plot(run, action, 'bo') # Additional point - plt.ylabel('action') - plt.xlabel('step') - plt.pause(0.05) + if args.plot: + plt.subplot(211) + plt.plot(run, reward, 'bo') # Additional point + plt.ylabel('reward') + plt.subplot(212) + #for ap in range(0, aps): + # plt.plot(actions[ap]) + plt.plot(run, action, 'bo') # Additional point + plt.ylabel('action') + plt.xlabel('step') + plt.pause(0.05) run += 1 From aa7829ab245b62d0f0ac4e1f0ea52c751822737b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Thu, 25 Apr 2019 18:47:57 +0200 Subject: [PATCH 24/54] disable plot --- openAI_RRM/rrm_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 9f47271..1a559a0 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -25,7 +25,7 @@ if not args.output: print("No output file specified! - Skip data") -if args.plot +if args.plot: import matplotlib.pyplot as plt #create uniflex environment, steptime is 10sec From 6505b64b71b87db6b7c74631d2661100542d0d33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 29 Apr 2019 17:51:15 +0200 Subject: [PATCH 25/54] select nuc4 as controller --- openAI_RRM/config_master.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/openAI_RRM/config_master.yaml b/openAI_RRM/config_master.yaml index f95d888..c5e8f1d 100644 --- a/openAI_RRM/config_master.yaml +++ b/openAI_RRM/config_master.yaml @@ -4,12 +4,12 @@ config: name: "Global_Controller" info: 'agent hosts global controller' iface: 'lo' - sub: "tcp://127.0.0.1:8990" - pub: "tcp://127.0.0.1:8989" + sub: "tcp://192.168.10.157:8990" + pub: "tcp://192.168.10.157:8989" broker: - xpub: "tcp://127.0.0.1:8990" - xsub: "tcp://127.0.0.1:8989" + xpub: "tcp://192.168.10.157:8990" + xsub: "tcp://192.168.10.157:8989" control_applications: myController: @@ -26,6 +26,6 @@ modules: class_name : PyreDiscoveryMasterModule kwargs: {"iface":"lo", "groupName":"uniflex_1234", - "sub":"tcp://127.0.0.1:8990", - "pub":"tcp://127.0.0.1:8989" + "sub":"tcp://192.168.10.157:8990", + "pub":"tcp://192.168.10.157:8989" } From 4d1e900cd3179fe6b7fbf71557792207d2bdfbfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Wed, 1 May 2019 14:52:52 +0200 Subject: [PATCH 26/54] fix real wifi problems --- openAI_RRM/channel_controller.py | 4 ++-- openAI_RRM/rrm_agent.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index e4aef2d..e73ec97 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -173,9 +173,9 @@ def set_channel(self, node_uuid, dev_uuid, ifaceName, channel_number, channel_wi if device is None: return False if channel_width is not None: - device.blocking(False).set_channel(channel_number, ifaceName, channel_width= channel_width) + device.blocking(False).set_channel(channel_number, ifaceName, channel_width= channel_width, control_socket_path='/var/run/hostapd') else: - device.blocking(False).set_channel(channel_number, ifaceName) + device.blocking(False).set_channel(channel_number, ifaceName, control_socket_path='/var/run/hostapd') return True ''' diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 1a559a0..2087220 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -66,8 +66,10 @@ model.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) - - state = np.reshape(state, [1, s_size]) + try: + state = np.reshape(state, [1, s_size]) + except ValueError: + continue rewardsum = 0 done = False From dd9b5e88c4677e8234b5b0034c732d88703da2d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Wed, 1 May 2019 18:18:34 +0200 Subject: [PATCH 27/54] variable steptime --- openAI_RRM/rrm_agent.py | 4 +++- openAI_RRM/thomson_agent.py | 35 +++++++++++++++++++++-------------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 2087220..fae4d3f 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -18,6 +18,8 @@ parser.add_argument('--config', help='path to the uniflex config file', default=None) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) +parser.add_argument('--steptime', help='interval between two steps', default=1) + args = parser.parse_args() if not args.config: print("No config file specified!") @@ -31,7 +33,7 @@ #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() -env.start_controller(steptime=1, config=args.config) +env.start_controller(steptime=args.steptime, config=args.config) epsilon = 1.0 # exploration rate epsilon_min = 0.01 diff --git a/openAI_RRM/thomson_agent.py b/openAI_RRM/thomson_agent.py index 211fe7e..ba393d0 100644 --- a/openAI_RRM/thomson_agent.py +++ b/openAI_RRM/thomson_agent.py @@ -3,21 +3,23 @@ import gym import UniFlexGym -import tensorflow as tf -import tensorflow.contrib.slim as slim +#import tensorflow as tf +#import tensorflow.contrib.slim as slim import numpy as np #from tensorflow import keras import argparse import logging import time import csv -import matplotlib.pyplot as plt from math import * parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) +parser.add_argument('--plot', help='activate plotting', default=None) +parser.add_argument('--steptime', help='interval between two steps', default=1) + args = parser.parse_args() if not args.config: print("No config file specified!") @@ -25,10 +27,14 @@ if not args.output: print("No output file specified! - Skip data") +if args.plot: + import matplotlib.pyplot as plt + + #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() -env.start_controller(steptime=1, config=args.config) +env.start_controller(steptime=int(args.steptime), config=args.config) epsilon = 1.0 # exploration rate epsilon_min = 0.01 @@ -112,16 +118,17 @@ print ("Average:" + str(avg)) print ("next step") - plt.subplot(211) - plt.plot(run, reward, 'bo') # Additional point - plt.ylabel('reward') - plt.subplot(212) - #for ap in range(0, aps): - # plt.plot(actions[ap]) - plt.plot(run, action, 'bo') # Additional point - plt.ylabel('action') - plt.xlabel('step') - plt.pause(0.05) + if args.plot: + plt.subplot(211) + plt.plot(run, reward, 'bo') # Additional point + plt.ylabel('reward') + plt.subplot(212) + #for ap in range(0, aps): + # plt.plot(actions[ap]) + plt.plot(run, action, 'bo') # Additional point + plt.ylabel('action') + plt.xlabel('step') + plt.pause(0.05) run += 1 From 49f2aae474c7cb3350f29308af9bd5e5b9cac498 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Wed, 1 May 2019 18:19:29 +0200 Subject: [PATCH 28/54] rename file --- openAI_RRM/{thomson_agent.py => thompson_agent.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename openAI_RRM/{thomson_agent.py => thompson_agent.py} (100%) diff --git a/openAI_RRM/thomson_agent.py b/openAI_RRM/thompson_agent.py similarity index 100% rename from openAI_RRM/thomson_agent.py rename to openAI_RRM/thompson_agent.py From 5e4d0cc39c683e67d6cedaac5ca50c3aa8cae64d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 6 May 2019 20:16:46 +0200 Subject: [PATCH 29/54] add step argument --- openAI_RRM/rrm_agent.py | 4 ++++ openAI_RRM/thompson_agent.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index fae4d3f..071cfe7 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -19,6 +19,7 @@ parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) parser.add_argument('--steptime', help='interval between two steps', default=1) +parser.add_argument('--steps', help='number of steps in this execution. If not set, the agents runs infinitly long', default=None) args = parser.parse_args() if not args.config: @@ -154,6 +155,9 @@ run += 1 + if args.steps and args.steps >= run: + quit() + episode += 1 diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index ba393d0..b16bb51 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -19,6 +19,7 @@ parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) parser.add_argument('--steptime', help='interval between two steps', default=1) +parser.add_argument('--steps', help='number of steps in this execution. If not set, the agents runs infinitly long', default=None) args = parser.parse_args() if not args.config: @@ -130,6 +131,9 @@ plt.xlabel('step') plt.pause(0.05) + if args.steps and args.steps >= run: + quit() + run += 1 episode += 1 From 1a6f6991b6f789dc8e13ef620dfbb5521973e1a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 6 May 2019 20:41:39 +0200 Subject: [PATCH 30/54] debug rrm agent --- openAI_RRM/rrm_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 071cfe7..b38e4a2 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -34,7 +34,7 @@ #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() -env.start_controller(steptime=args.steptime, config=args.config) +env.start_controller(steptime=int(args.steptime), config=args.config) epsilon = 1.0 # exploration rate epsilon_min = 0.01 From 9fe8fce0717a3fb33cac8f1e38c04c08c95930fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 6 May 2019 20:47:01 +0200 Subject: [PATCH 31/54] debug rrm agent --- openAI_RRM/rrm_agent.py | 2 +- openAI_RRM/thompson_agent.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index b38e4a2..0476e4a 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -155,7 +155,7 @@ run += 1 - if args.steps and args.steps >= run: + if args.steps and int(args.steps) >= run: quit() episode += 1 diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index b16bb51..8deddc4 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -131,7 +131,7 @@ plt.xlabel('step') plt.pause(0.05) - if args.steps and args.steps >= run: + if args.steps and int(args.steps) >= run: quit() run += 1 From b68892a8ef164ca2750370d670090be491cb0228 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 6 May 2019 20:49:35 +0200 Subject: [PATCH 32/54] debug rrm agent --- openAI_RRM/rrm_agent.py | 2 +- openAI_RRM/thompson_agent.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 0476e4a..16651dd 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -155,7 +155,7 @@ run += 1 - if args.steps and int(args.steps) >= run: + if args.steps and int(args.steps) < run: quit() episode += 1 diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 8deddc4..5922568 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -131,7 +131,7 @@ plt.xlabel('step') plt.pause(0.05) - if args.steps and int(args.steps) >= run: + if args.steps and int(args.steps) < run: quit() run += 1 From 896080f6ba9f2ca7b77ab8e6492d599eafbbb350 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 6 May 2019 20:51:47 +0200 Subject: [PATCH 33/54] debug rrm agent --- openAI_RRM/rrm_agent.py | 5 +++-- openAI_RRM/thompson_agent.py | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 16651dd..681cac7 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -11,6 +11,7 @@ import logging import time import csv +import os from math import * @@ -24,7 +25,7 @@ args = parser.parse_args() if not args.config: print("No config file specified!") - quit() + os._exit(1) if not args.output: print("No output file specified! - Skip data") @@ -156,7 +157,7 @@ run += 1 if args.steps and int(args.steps) < run: - quit() + os._exit(1) episode += 1 diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 5922568..916680b 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -11,6 +11,7 @@ import logging import time import csv +import os from math import * @@ -24,7 +25,7 @@ args = parser.parse_args() if not args.config: print("No config file specified!") - quit() + os._exit(1) if not args.output: print("No output file specified! - Skip data") @@ -132,7 +133,7 @@ plt.pause(0.05) if args.steps and int(args.steps) < run: - quit() + os._exit(1) run += 1 From e45b3864a619be403ce705a3c3c529482b5656d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 7 May 2019 12:05:47 +0200 Subject: [PATCH 34/54] enable float times --- openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml | 2 +- openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml | 2 +- openAI_RRM/rrm_agent.py | 2 +- openAI_RRM/thompson_agent.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml index 6207a48..190e5c1 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml @@ -24,5 +24,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughput':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml index 9073205..4d226be 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml @@ -24,5 +24,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughput':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 681cac7..5946393 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -35,7 +35,7 @@ #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() -env.start_controller(steptime=int(args.steptime), config=args.config) +env.start_controller(steptime=float(args.steptime), config=args.config) epsilon = 1.0 # exploration rate epsilon_min = 0.01 diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 916680b..4efcba8 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -36,7 +36,7 @@ #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() -env.start_controller(steptime=int(args.steptime), config=args.config) +env.start_controller(steptime=float(args.steptime), config=args.config) epsilon = 1.0 # exploration rate epsilon_min = 0.01 From 6962f93e4e20205b97db2fe8fe2501f1a56ecd0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 7 May 2019 12:14:45 +0200 Subject: [PATCH 35/54] log sqrt value error --- openAI_RRM/channel_controller.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index e73ec97..7d2b2d7 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -447,6 +447,10 @@ def get_reward(self): reward = 0 for key in bandwidthList: item = bandwidthList[key] + if item['bandwidth'] < 0: + print("Bandwidth has invalid value: " + str(item['bandwidth'])) + print(bandwidthList) + continue reward += sqrt(item['bandwidth']) return reward From 86abd7b2259d239388c9d2493004742588874858 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sat, 11 May 2019 14:34:44 +0200 Subject: [PATCH 36/54] set broker address in ap, create unsymetric example --- .../Setting2_2/config_slave.yaml | 7 +++ .../Setting2_2/config_slave2.yaml | 7 +++ .../Setting2_2unsym/common.py | 28 ++++++++++ .../Setting2_2unsym/config_slave.yaml | 37 +++++++++++++ .../Setting2_2unsym/config_slave2.yaml | 37 +++++++++++++ .../Setting2_2unsym/my_filter.py | 53 +++++++++++++++++++ .../Setting2_2unsym/readme.txt | 14 +++++ .../Setting3_112/config_slave.yaml | 7 +++ .../Setting3_112/config_slave2.yaml | 7 +++ .../Setting3_112/config_slave3.yaml | 7 +++ .../Setting3_222/config_slave.yaml | 7 +++ .../Setting3_222/config_slave2.yaml | 7 +++ .../Setting3_222/config_slave3.yaml | 7 +++ 13 files changed, 225 insertions(+) create mode 100755 openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/common.py create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml create mode 100755 openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/my_filter.py create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml index 190e5c1..0305fef 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml index 4d226be..fff7275 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/common.py b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml new file mode 100644 index 0000000..0f3b703 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml @@ -0,0 +1,37 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:01', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault': 54000000, + 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], + 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml new file mode 100644 index 0000000..bdd9d3d --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml @@ -0,0 +1,37 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11"], + 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + 'myMAC' : 'aa:aa:aa:aa:aa:02', + ''simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault': 54000000, + 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], + 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt new file mode 100644 index 0000000..a99904a --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt @@ -0,0 +1,14 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +source ~/Uniflex/dev/bin/activate + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +python3 rrm_agent.py --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml +uniflex-agent --config ./config_slave2.yaml + +# For debugging mode run with -v option diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index e23a542..8e078b1 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index 2ba919f..d0c06a2 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index 5ddf209..db179bd 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml index 8827d13..ef3246f 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml index 9073205..d0d4fa7 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml index 43b0cce..88e3872 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml @@ -4,6 +4,13 @@ config: name: 'HC node' info: 'filter runs on local node' iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: From b771e121b1a585d2a514555ccac629baa5d285e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sun, 12 May 2019 21:10:51 +0200 Subject: [PATCH 37/54] comment controller --- openAI_RRM/channel_controller.py | 345 ++++------ openAI_RRM/data_20190419/readme | 37 - openAI_RRM/data_20190419/rrmagent_22_1.csv | 338 --------- openAI_RRM/data_20190419/rrmagent_3112.csv | 641 ----------------- openAI_RRM/data_20190419/rrmagent_3112_3.csv | 649 ------------------ openAI_RRM/data_20190419/rrmagent_3222_1.csv | 410 ----------- openAI_RRM/data_20190419/rrmagent_3222_2.csv | 361 ---------- openAI_RRM/data_20190419/thomagent_3112.csv | 630 ----------------- .../data_20190419/thomsonagent_22_1.csv | 357 ---------- .../data_20190419/thomsonagent_3222.csv | 376 ---------- 10 files changed, 115 insertions(+), 4029 deletions(-) delete mode 100644 openAI_RRM/data_20190419/readme delete mode 100644 openAI_RRM/data_20190419/rrmagent_22_1.csv delete mode 100644 openAI_RRM/data_20190419/rrmagent_3112.csv delete mode 100644 openAI_RRM/data_20190419/rrmagent_3112_3.csv delete mode 100644 openAI_RRM/data_20190419/rrmagent_3222_1.csv delete mode 100644 openAI_RRM/data_20190419/rrmagent_3222_2.csv delete mode 100644 openAI_RRM/data_20190419/thomagent_3112.csv delete mode 100644 openAI_RRM/data_20190419/thomsonagent_22_1.csv delete mode 100644 openAI_RRM/data_20190419/thomsonagent_3222.csv diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 7d2b2d7..16866e1 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -36,8 +36,6 @@ def __init__(self,**kwargs): self.running = False self.timeInterval = 10 -# self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) -# self.timer.start(self.timeInterval) self.packetLossEventsEnabled = False self.channel = 1 @@ -53,14 +51,6 @@ def __init__(self,**kwargs): if 'simulation' in kwargs: self.simulation = kwargs['simulation'] - -# if not "openAI_controller" in kwargs: -# raise ValueError("There is no OpenAI gym controller specified. Can not #find \"" + "openAI_controller" + "\" as kwargs in the config file.") -# else: -# __import__(kwargs["openAI_controller"], globals(), locals(), [], 0) -# splits = kwargs["openAI_controller"].split('.') -# class_name = splits[-1] -# self.openAI_controller = class_name(self, kwargs) @modules.on_start() def my_start_function(self): @@ -167,8 +157,19 @@ def scheduled_get_channel_cb(self, data): print("Scheduled get_channel; Power in " "Node: {}, Dev: {}, was set to: {}" .format(node.hostname, dev.name, msg)) - + + ''' + Channel mapping controller + ''' def set_channel(self, node_uuid, dev_uuid, ifaceName, channel_number, channel_width): + ''' + Set one channel to one AP + :param node_uuid: UUID of AP node + :param dev_uuid: UUID of AP device + :param ifaceName: Name of AP interface + :param channel_number: Number of new channel + :param channel_width: Bandwidth of new channel + ''' device = self._get_device_by_uuids(node_uuid, dev_uuid) if device is None: return False @@ -178,27 +179,27 @@ def set_channel(self, node_uuid, dev_uuid, ifaceName, channel_number, channel_wi device.blocking(False).set_channel(channel_number, ifaceName, control_socket_path='/var/run/hostapd') return True - ''' - Returns a list of the bandwidth of all transmitted data from one - controlled device to a client. The data is structured as follows: - { - 'MAC_of_client1' : { - 'mac' : 'MAC_of_client1', - 'bandwidth': bandwidth to the client, - 'node': { - 'hostname': 'hostname of my AP node', - 'uuid': 'uuid of my AP node' - }, - 'device': { - 'name': 'device name of the AP's physical interface', - 'uuid': 'uuid of the device', - }, - 'interface': 'name of the interface' - } - } - Notice: new devices have bandwidth 0! - ''' def get_bandwidth(self): + ''' + Returns a list of the bandwidth of all transmitted data from one + controlled device to a client. The data is structured as follows: + { + 'MAC_of_client1' : { + 'mac' : 'MAC_of_client1', + 'bandwidth': bandwidth to the client, + 'node': { + 'hostname': 'hostname of my AP node', + 'uuid': 'uuid of my AP node' + }, + 'device': { + 'name': 'device name of the AP's physical interface', + 'uuid': 'uuid of the device', + }, + 'interface': 'name of the interface' + } + } + Notice: new devices have bandwidth 0! + ''' bandwidth = {} for node in self.get_nodes(): for device in node.get_devices(): @@ -241,26 +242,26 @@ def get_bandwidth(self): device.my_control_flow.remove(flow) return bandwidth - ''' - Returns a data structure of all available interfaces in the system - It is structured as follows: - { - 'uuid_of_node_1': { - 'hostname' : 'hostname of node1', - 'uuid' : 'uuid of node1', - 'devices' : { - 'name' : 'name of device1', - 'uuid' : 'uuid of device1', - 'interfaces' : [ - 'name of iface1', 'name of iface2' - ] - }, - ... - }, - ... - } - ''' def get_interfaces(self): + ''' + Returns a data structure of all available interfaces in the system + It is structured as follows: + { + 'uuid_of_node_1': { + 'hostname' : 'hostname of node1', + 'uuid' : 'uuid of node1', + 'devices' : { + 'name' : 'name of device1', + 'uuid' : 'uuid of device1', + 'interfaces' : [ + 'name of iface1', 'name of iface2' + ] + }, + ... + }, + ... + } + ''' interfaces = {} for node in self.get_nodes(): nodeinfo = {'hostname': node.hostname, 'uuid': node.uuid} @@ -276,23 +277,23 @@ def get_interfaces(self): interfaces[node.uuid] = nodeinfo return interfaces - ''' - Collects and returns a list of the channel to interface mapping - [ - {'channel number' : 'number of the channel', - 'channel width' : 'width of the channel', - 'node': { - 'hostname': 'hostname of my AP node', - 'uuid': 'uuid of my AP node' - }, - 'device': { - 'name': 'device name of the AP's physical interface', - 'uuid': 'uuid of the device', - }, - 'interface': 'name of the interface' - ] - ''' def get_channels(self): + ''' + Collects and returns a list of the channel to interface mapping + [ + {'channel number' : 'number of the channel', + 'channel width' : 'width of the channel', + 'node': { + 'hostname': 'hostname of my AP node', + 'uuid': 'uuid of my AP node' + }, + 'device': { + 'name': 'device name of the AP's physical interface', + 'uuid': 'uuid of the device', + }, + 'interface': 'name of the interface' + ] + ''' channel_mapping = [] for node in self.get_nodes(): for device in node.get_devices(): @@ -309,7 +310,13 @@ def get_channels(self): return channel_mapping def simulate_flows(self): + ''' + Simulate packet counters on simulated APs + ''' + flows = [] + + #collect state(channels and bandwidth) of all devices for node in self.get_nodes(): for device in node.get_devices(): for interface in device.get_interfaces(): @@ -319,7 +326,8 @@ def simulate_flows(self): mac = device.get_address() flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw, 'iface': interface}) - + + # simulate packet counter on AP modules for node in self.get_nodes(): for device in node.get_devices(): for interface in device.get_interfaces(): @@ -338,26 +346,8 @@ def periodic_evaluation(self, event): self.reset() self.execute_action([1]) print(self.get_observation()) - ''' - flows = [] - - ifaces = self.get_interfaces() - node_uuid = list(ifaces.keys())[0] - dev_uuid = list(ifaces[node_uuid]['devices'].keys())[0] - ifaceName = ifaces[node_uuid]['devices'][dev_uuid]['interfaces'][0] - - print(self.get_channels()) - self.simulate_flows() - print(self.get_bandwidth()) - - for node in self.get_nodes(): - for device in node.get_devices(): - for interface in device.get_interfaces(): - self.set_channel(node.uuid, device.uuid, interface, self.channel, None) - self.channel += 1 - if self.channel > 13: - self.channel = 1 - ''' + + ''' OpenAI Gym Uniflex env API @@ -370,9 +360,6 @@ def reset(self): self.actionSpace = self.get_actionSpace() self.actionSet = [] - #for index in range(actionSpace): - - interfaces = self.get_interfaces() # set a start channel for each interface: @@ -392,30 +379,37 @@ def reset(self): return def execute_action(self, action): + ''' + Map scalar action to channel vector + channel value = (action/numberOfChannels^AP_id) mod numberOfChannels + ''' for index, interface in enumerate(self._create_interface_list()): ifaceaction = int(action / (pow(len(self.availableChannels),index))) ifaceaction = ifaceaction % len(self.availableChannels) self.set_channel(interface['node'], interface['device'], interface['iface'], self.availableChannels[ifaceaction], None) - #try: - # for index, actionStep in enumerate(action): - # interface = self.actionSpace[index] - # self.set_channel(interface['node'], interface['device'], interface['iface'], actionStep*4+1, None) - #except TypeError: - # interface = self.actionSpace[0] - # self.set_channel(interface['node'], interface['device'], interface['iface'], action*4+1, None) return def render(): return def get_observationSpace(self): + ''' + Returns observation space for open AI gym + result is a MultiDiscrete vector space + each component has the number of available channels. Is the same value for all entries + ''' maxValues = [len(self.availableChannels) for i in self._create_interface_list()] #return spaces.Box(low=0, high=numChannels, shape=(len(self._create_interface_list()),0), dtype=numpy.float32) return spaces.MultiDiscrete(maxValues) #spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) def get_actionSpace(self): + ''' + Returns action space for open AI gym + result is a Discrete scalar space + dimension is NumberOfChannels^NumberOfAPs + ''' interfaceList = self._create_interface_list(); if(len(interfaceList) > 0): self.log.info("UUIDs of the action space") @@ -426,18 +420,29 @@ def get_actionSpace(self): return spaces.Discrete(pow(len(self.availableChannels), len(interfaceList))) def get_observation(self): + ''' + Returns vector with state (channel) of each AP + ''' channels = self.get_channels() observation = list(map(lambda x: x['channel number'], channels)) return observation # game over if there is a new interface def get_gameOver(self): + ''' + Test if topology changes + Bases on information, which client is registered at which AP + ''' clients = self._create_client_list() clientHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in clients] observationSpaceHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in self.registeredClients] return not len(set(clientHash).symmetric_difference(set(observationSpaceHash))) == 0 def get_reward(self): + ''' + Calculate reward for the current state + reward = sum (sqrt(throughput of client)) + ''' # for simulation if(self.simulation): self.simulate_flows() @@ -457,12 +462,21 @@ def get_reward(self): def _get_bandwidth_by_client(self, bandwidthList, clientData): + ''' + extracts bandwidth of client from bandwidth list + :param bandwidthList: List of all clients, the AP they are associated with and their bandwidth + :param clientData: data of the client. + ''' for mac, client in bandwidthList.items(): if (mac == clientData['mac']) and (client['node']['uuid'] == clientData['node']) and (client['device']['uuid'] == clientData['device']) and (client['interface'] == clientData['iface']): return client['bandwidth'] return None def _create_client_list(self): + ''' + create linear client list + result is list of dictionarys with attribute: mac, node, device, iface + ''' clientList = [] clients = self.get_bandwidth() for mac, client in clients.items(): @@ -472,6 +486,10 @@ def _create_client_list(self): return clientList def _create_interface_list(self): + ''' + create linear ap list + result is list of dictionarys with attribute: node, device, iface + ''' interfaceList = [] interfaces = self.get_interfaces() for nodeUuid, node in interfaces.items(): @@ -479,136 +497,3 @@ def _create_interface_list(self): for iface in device['interfaces']: interfaceList.append({'node': node['uuid'], 'device': device['uuid'], 'iface': iface}) return interfaceList - - - - - ''' - print(self.get_bandwidth()) - - print(self.get_nodes()) - for node in self.get_nodes(): - print(node.get_devices()) - for device in node.get_devices(): - device.spectral_scan_stop() - chnum = device.get_channel("wlan0") - chw = device.get_channel_width("wlan0") - infos = device.get_info_of_connected_devices("wlan0") - - for mac in infos: - flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw}) - - for node in self.get_nodes(): - print ("work " + node.hostname) - for device in node.get_devices(): - - if type(device.my_control_flow) is not list: - device.my_control_flow = [] - - for flow in device.my_control_flow: - flow['old'] = True - - device.set_packet_counter(flows, "wlan0") - chnum = device.get_channel("wlan0") - chw = device.get_channel_width("wlan0") - infos = device.get_info_of_connected_devices("wlan0") - - bandwidth = {} - - for mac in infos: - values = infos[mac] - newTxBytes = int(values['tx bytes'][0]) - - flow = [d for d in device.my_control_flow if d['mac address'] == mac] - if len(flow) > 0: - flow = flow[0] - dif = datetime.datetime.now() - flow['last update'] - bandwidth[mac] = (newTxBytes - flow['tx bytes'] ) / (dif.total_seconds() + dif.microseconds / 1000000.0) - flow['tx bytes'] = newTxBytes - flow['last update'] = datetime.datetime.now() - flow['old'] = False - else : - device.my_control_flow.append({'mac address' : mac, 'tx bytes' : newTxBytes, 'last update' : datetime.datetime.now(), 'old' : False}) - - for flow in device.my_control_flow: - if flow['old']: - device.my_control_flow.remove(flow) - - print ("device " + device.name + " operates on channel " + str(chnum) + " with a bandwidth of " + chw + " - change to channel " + str(self.channel)) - print(bandwidth) - - device.blocking(False).set_channel(self.channel, "wlan0") - - self.channel += 1 - if self.channel > 13: - self.channel = 1 - ''' - ''' - node = self.get_node(0) - device = node.get_device(0) - - if device.is_packet_loss_monitor_running(): - device.packet_loss_monitor_stop() - device.spectral_scan_stop() - else: - device.packet_loss_monitor_start() - device.spectral_scan_start() - - avgFilterApp = None - for app in node.get_control_applications(): - if app.name == "MyAvgFilter": - avgFilterApp = app - break - - if avgFilterApp.is_running(): - myValue = random.randint(1, 20) - [nValue1, nValue2] = avgFilterApp.blocking(True).add_two(myValue) - print("My value: {} + 2 = {}".format(myValue, nValue1)) - print("My value: {} * 2 = {}".format(myValue, nValue2)) - avgFilterApp.stop() - - newWindow = random.randint(10, 50) - old = avgFilterApp.blocking(True).get_window_size() - print("Old Window Size : {}".format(old)) - avgFilterApp.blocking(True).change_window_size_func(newWindow) - nValue = avgFilterApp.blocking(True).get_window_size() - print("New Window Size : {}".format(nValue)) - - else: - avgFilterApp.start() - newWindow = random.randint(10, 50) - event = ChangeWindowSizeEvent(newWindow) - avgFilterApp.send_event(event) - - # execute non-blocking function immediately - device.blocking(False).set_tx_power(random.randint(1, 20), "wlan0") - - # execute non-blocking function immediately, with specific callback - device.callback(self.get_power_cb).get_tx_power("wlan0") - - # schedule non-blocking function delay - device.delay(3).callback(self.default_cb).get_tx_power("wlan0") - - # schedule non-blocking function exec time - exec_time = datetime.datetime.now() + datetime.timedelta(seconds=3) - newChannel = random.randint(1, 11) - device.exec_time(exec_time).set_channel(newChannel, "wlan0") - - # schedule execution of function multiple times - start_date = datetime.datetime.now() + datetime.timedelta(seconds=2) - interval = datetime.timedelta(seconds=1) - repetitionNum = 3 - device.exec_time(start_date, interval, repetitionNum).callback(self.scheduled_get_channel_cb).get_channel("wlan0") - - # execute blocking function immediately - result = device.get_channel("wlan0") - print("{} Channel is: {}".format(datetime.datetime.now(), result)) - - # exception handling, clean_per_flow_tx_power_table implementation - # raises exception - try: - device.clean_per_flow_tx_power_table("wlan0") - except Exception as e: - print("{} !!!Exception!!!: {}".format( - datetime.datetime.now(), e)) - ''' diff --git a/openAI_RRM/data_20190419/readme b/openAI_RRM/data_20190419/readme deleted file mode 100644 index 289a7a4..0000000 --- a/openAI_RRM/data_20190419/readme +++ /dev/null @@ -1,37 +0,0 @@ -Experiment 1.Stelle 2.Stelle 3.Stelle Erfolg -rrm_22_1 AP2 AP1 Ja -thom_22_1 AP2 AP1 Ja -rrm_3222 AP3 AP1 AP2 -rrm_3222_2 AP3 AP1 AP2 Ja -thom_3222 AP3 AP2 AP1 Ja -rrm_3112 AP3 AP2 AP1 Nein -rrm_3112_2 AP1 AP2 AP3 Nein -thom_3112 AP3 AP2 AP1 Ja -rrm_3112_3 AP2 AP3 AP1 Ja - -Setup 22 -AP1 = 8052dedf-e186-46fa-81c3-6ef20e3f022c -AP2 = 5c1c1270-e2bf-4fd1-a2e3-3080c41dd700 - -Setup 3_222 1. Experiment -AP1 = 89c12af0-0eaa-4b35-95ff-f351d5b576e7 -AP2 = 604c186b-4e00-4e69-a2db-6d23447e9d7c -AP3 = 4268dc71-710d-455f-ac8f-17a827fc9f96 - -Setup 3_222 2. Experiment -AP1 = 8a26f145-a736-4784-b311-e09c7438bed2 -AP2 = 52433dc2-b784-45fa-b2a7-2418e9d4d7c0 -AP3 = 92a6d556-bf0b-4bfb-b0a5-6c2fdf758931 - -Setup 3_112 1. Experiment -AP1 = 0bccab73-2c39-407a-b4e7-ee05e772cfec -AP2 = 7529367c-2fa5-4932-a7d0-666735d43d2d -AP3 = 6f47dce9-11a4-4b2c-8b92-12dbf104ac33 - -Setup 3_112 2. Experiment -AP1 = f3667375-4fa6-408d-b56a-783478c92dda -AP2 = 26515431-cc58-45c1-810f-39ac766559f3 -AP3 = a6b812a1-f199-4b2f-a2c5-3e1a1831095c - -Kanäle 1,5 - diff --git a/openAI_RRM/data_20190419/rrmagent_22_1.csv b/openAI_RRM/data_20190419/rrmagent_22_1.csv deleted file mode 100644 index a9c1e6a..0000000 --- a/openAI_RRM/data_20190419/rrmagent_22_1.csv +++ /dev/null @@ -1,338 +0,0 @@ -6.968643339234087,2 -4.877340252097936,3 -6.894486594503971,2 -4.829809671219739,0 -5.025592454625421,0 -6.80486489797023,1 -6.500577186643617,2 -4.849531907866088,3 -5.0654168120928365,3 -4.928014473826464,3 -4.884206440927315,3 -6.823913201397861,2 -4.827791397754525,3 -4.710968682020747,0 -4.725494201479047,3 -4.6772298648356445,0 -4.971634498050561,0 -6.734628669641348,1 -6.79381191195557,2 -4.916999928356259,3 -6.777327164411419,2 -4.785054535634852,3 -4.668890283605787,0 -6.659677808303656,2 -6.573804989583071,1 -4.840337352734287,3 -6.674679458111923,1 -6.546318109013128,2 -7.0548103277975525,2 -6.648963612882398,1 -4.939050565760155,3 -6.746748765316303,2 -7.126035060886703,2 -4.891647646109325,0 -6.9516003435488125,2 -6.666285651572075,1 -6.632430634910994,2 -6.889853767492763,2 -7.085690357700651,2 -4.699101634384928,3 -6.637868493469754,2 -7.194799179402078,2 -4.729267738351576,0 -4.646291652109486,3 -7.046479016628102,2 -6.382961019959405,1 -6.623067984282345,2 -6.475330639586478,1 -4.847108723132625,0 -6.757747384927922,1 -6.612215024235049,2 -4.887159500981526,0 -6.7681333364099086,1 -6.392783034634185,2 -4.8235575376641915,0 -6.635191843943176,2 -7.061104250860142,2 -4.825520768653243,0 -6.658124978393555,2 -6.873119464410044,2 -4.829193485615847,0 -6.816956393488488,2 -7.348078859141863,2 -7.190509995978846,2 -6.598472548056792,1 -6.618930307898433,2 -6.555433151026691,1 -6.996693716625735,1 -4.74573244351075,0 -6.969089045677698,2 -6.913747833532728,2 -6.935699065471702,2 -4.886310241395686,3 -5.063963543365806,3 -6.865671251885623,1 -4.851177558792399,3 -6.772657103600182,2 -6.977968406655536,2 -7.080783280679432,2 -7.174101049587835,2 -4.920430285351007,3 -6.878660281342454,2 -4.9223665650227355,0 -6.706509956584688,2 -6.489952360068629,1 -6.414588460610277,2 -7.118026227021735,2 -6.463803113236342,1 -6.6291784186262035,2 -6.899788881249748,2 -7.2864317905635705,2 -4.857210603224528,0 -6.772191109731639,1 -7.138453523981743,1 -6.536643947786893,2 -4.91954758732016,0 -7.048022771804469,2 -7.1481066539469165,2 -4.853762378713745,3 -6.867822483406982,2 -4.854438805676736,0 -4.6990289552568685,3 -6.942845794675865,2 -4.814115266294784,3 -4.648821123685326,0 -6.7816863689134514,2 -7.1827109040931205,2 -6.998684379219938,2 -4.823120195312265,3 -6.870245593470276,2 -7.005422003770458,2 -7.10160366288154,2 -4.743335626562599,3 -6.890518645053367,1 -4.910143805147186,0 -6.737654296082274,2 -6.831435331211273,2 -4.773715740657117,0 -5.030927248125971,0 -6.830290986841219,2 -7.132640642938056,2 -7.012116058149708,2 -7.034015889432277,2 -4.875144612358995,0 -4.989051541358437,0 -6.760353113642177,2 -6.5487112243726076,1 -6.520886288454751,2 -7.070408028675438,2 -6.606899313434736,1 -6.402962347926719,2 -4.702202211294271,0 -6.891731604462052,2 -4.752448447919154,0 -6.864958652102934,2 -6.855078935651558,2 -4.905512048107439,3 -6.783473624652729,2 -7.201016187093753,2 -7.051253054127781,2 -7.0680783915912455,2 -7.004368286139357,2 -7.141818835733709,2 -4.897877672956865,3 -6.661111733005551,2 -4.9928829901804805,0 -6.777076182365991,2 -6.452636089576176,1 -6.625842294192408,2 -7.001313472954859,2 -6.6631872624987185,1 -6.654628248657436,2 -7.009231604900202,2 -7.049048846494447,2 -4.966995864947118,0 -6.936887654507816,1 -6.592492443618869,2 -7.179452609410091,2 -7.190989514642377,2 -7.082841905052061,2 -7.191275220247753,2 -4.817912399655011,3 -6.83713312008293,2 -4.722326983159793,0 -6.710742039869401,2 -4.743531144429287,0 -6.6378328722729885,2 -6.583251087299078,1 -6.7140441240083755,2 -7.109080841828271,2 -7.258536038831862,2 -4.913052942189178,3 -7.01482149395946,2 -6.919169689189318,2 -7.110053530122957,2 -6.981844464530859,2 -7.059908941702666,2 -4.789776862904222,0 -6.680802123693604,2 -7.051842600816948,2 -4.936685534905964,3 -4.685305792112524,0 -6.915744774907733,2 -7.214228174234699,2 -7.060875154171194,2 -4.806483676969002,0 -4.9715286396636245,0 -6.860747440488777,2 -7.138472303395467,2 -4.835968790285445,0 -6.685903933027552,2 -7.1074048304785595,2 -7.056817810488039,2 -6.420762907490318,1 -7.009962624457808,1 -6.628189707434896,2 -4.830593329626463,3 -4.954873836652197,3 -6.65825643070384,2 -4.792722050542475,0 -6.793214190594365,2 -7.039737791980063,2 -4.744820571819737,0 -4.8205618610947605,3 -4.684542979132895,0 -6.867747233324013,2 -6.878029638148211,2 -6.563539819209222,1 -7.212530057749915,1 -6.77509775604327,2 -7.30031520142915,2 -6.848728274656735,2 -7.056571973972035,2 -6.445156583932425,1 -6.648560685289599,2 -7.0115084102559795,2 -7.185117669485082,2 -6.741130317286606,1 -6.501099436849239,2 -5.030723615305958,0 -5.093808830092948,0 -6.72786551418672,2 -7.016205597664731,2 -4.860231948679554,3 -6.7164589995306185,2 -7.082702091898946,2 -7.162307678918578,2 -7.00180080384133,2 -4.702571251143576,3 -6.783500950309879,2 -7.0984519319460295,2 -6.948196638438487,2 -7.177266530985688,2 -7.10487170553006,2 -6.896338684579194,2 -6.964668263600719,2 -4.838488965787582,3 -6.931169502477494,2 -7.012010591875666,2 -6.9305120390077395,2 -7.01886499209714,2 -6.686300358378705,1 -6.478197250800746,2 -7.108981985394132,2 -4.8704101271040985,3 -6.8589890963990765,2 -4.786408524653583,0 -5.10362913849168,0 -6.89184767895632,2 -7.040603144969317,2 -4.807630653126563,3 -4.6238175122393885,0 -4.574610403279715,3 -6.838592440835193,2 -7.188905110462382,2 -7.092075428290929,2 -6.965890611010906,2 -6.8410176009886765,2 -6.879959611672154,2 -7.137476307195644,2 -7.191988430997359,2 -6.907463492826523,2 -6.932056661460609,2 -6.942916038460171,2 -6.877741995221439,2 -7.045372205692875,2 -7.0787385444704505,2 -6.756599961962838,2 -7.176309154919773,2 -7.15220093554735,2 -7.235784070661875,2 -6.8821976020033135,2 -7.213369089809258,2 -7.206141927882715,2 -6.889058360718991,2 -7.090830174847597,2 -7.163511595022865,2 -6.978910620547534,2 -6.991615356945119,2 -7.161782227243371,2 -7.155438984308994,2 -6.984049761326135,2 -4.745353438497089,3 -6.793316714332721,2 -7.131776451117646,2 -6.562429813286526,1 -6.438395104821105,2 -7.006833196411003,2 -6.904562201316457,2 -6.917422612028198,2 -7.289732508558295,2 -7.071289841728291,2 -6.9969520532745175,2 -7.037187193073474,2 -4.772903220397086,0 -6.70843209380403,2 -6.899288651070654,2 -7.194909057312767,2 -6.763903960744844,2 -7.16577586627564,2 -7.224529931166564,2 -6.915155959260374,2 -6.645465888444863,1 -6.452513258409276,2 -6.799732876507319,2 -6.860994050302957,2 -7.159079252396944,2 -7.09834688734944,2 -6.968707828566198,2 -7.076907124357614,2 -6.89088874293343,2 -7.030734053034645,2 -6.977380208522623,2 -7.178285056710084,2 -7.048217163193621,2 -7.220657465641554,2 -7.220435049436793,2 -7.133472358246626,2 -7.2212033767085035,2 -7.197598410186831,2 -7.041386395567242,2 -7.049590436975319,2 -7.074619620358661,2 -7.109738928371129,2 -7.142986620071055,2 -7.045726014879667,2 -7.092335092969319,2 -7.038349511691561,2 -7.168301496672241,2 -6.90746412484504,2 -6.946230283683841,2 -7.069570089106983,2 -7.150555964667147,2 -6.9213233969319505,2 -6.872091106506848,2 -7.065164546813956,2 -7.151857625837071,2 -6.854066779931638,2 diff --git a/openAI_RRM/data_20190419/rrmagent_3112.csv b/openAI_RRM/data_20190419/rrmagent_3112.csv deleted file mode 100644 index 408f996..0000000 --- a/openAI_RRM/data_20190419/rrmagent_3112.csv +++ /dev/null @@ -1,641 +0,0 @@ -4.636890087010754,7 -6.7799893316953845,6 -6.459088955705708,4 -6.737466573405607,4 -4.622919332688427,7 -6.454850869677397,5 -6.3757979342637325,3 -4.6067086084799325,0 -4.916798604753089,0 -6.994094441982819,6 -6.636033116666916,3 -6.676290717409006,3 -6.509528433300335,5 -6.764079602315745,1 -6.6329234575558695,2 -6.744756201716382,6 -4.781002888853598,0 -6.615589297246637,3 -6.874998734332087,3 -6.613308947332084,5 -6.358291524087188,4 -6.6388000788720785,4 -6.4916776488646395,2 -4.902356258451068,0 -6.735080351388777,6 -6.7731126945291455,2 -6.400480311260973,5 -4.732138080266287,0 -5.049165950277201,0 -4.6647733435089584,7 -4.624783412830145,0 -6.5024580372600465,2 -4.627272198674834,7 -6.645363868686492,3 -6.751042323856252,6 -6.51111985345798,5 -6.333852721628196,2 -4.609642123320378,7 -6.594991773471231,3 -6.878649504019797,3 -6.806712243280641,6 -7.156634187757507,6 -6.701943608139835,1 -4.707173388556988,7 -5.0451397351512774,7 -6.4818417629515555,4 -6.786135751230505,4 -7.050229242196158,6 -6.555112142283533,2 -6.928178754008459,2 -4.806001451218146,7 -6.514877732247445,4 -6.452351976564043,2 -4.778554896985806,7 -6.51798685812305,4 -6.526290220484503,5 -6.913833832352378,4 -6.879516280967922,4 -6.930145339454475,4 -4.877088013042241,0 -4.598945061631151,7 -6.342474985037088,4 -6.904648665309053,4 -6.293000318118076,3 -6.63975482026757,6 -6.527955387644473,3 -6.816215075873159,3 -6.713273857431792,2 -6.658073381213166,3 -6.81064409164542,6 -6.921344886097386,2 -6.397977942125616,3 -6.831977048727669,6 -6.553330568820461,1 -6.562913150569115,6 -6.708146790865019,2 -4.744719445987243,7 -6.867621605351927,6 -4.712822617001235,7 -6.596809414930148,3 -6.863458753571094,6 -7.205518211587517,6 -4.753668627081319,0 -6.612750526405767,2 -4.743253140430196,7 -6.452242752442089,4 -6.759645934800799,6 -4.7179688779429245,0 -4.895529721760933,0 -7.093671617768086,6 -6.790074222447813,4 -6.722563703473493,4 -6.5056469097177745,2 -6.717148078769022,1 -6.610660512665452,6 -6.51441649720159,4 -6.665153358117918,4 -6.969866331321373,1 -6.715169202053958,3 -4.634713700159363,0 -6.679126768291417,6 -6.511634505827729,1 -6.3366121708030025,4 -6.58380917629667,2 -4.7082623506159935,7 -6.908149571503203,6 -6.4955561910074735,5 -6.647220125538881,4 -6.914165805780529,4 -4.609283622207793,7 -6.567466303083183,2 -4.678539862768614,7 -6.604524545811169,5 -4.745245800139197,0 -6.689496432230957,6 -4.79834785016499,0 -6.574621106349144,2 -4.81625947162899,7 -7.004680615330818,6 -4.705007363600694,0 -6.371193666089834,2 -4.6229095218206115,7 -7.072257941642865,1 -6.751828350459519,5 -6.359758823795674,4 -6.780224713436874,4 -6.850018474086452,4 -6.743113018080438,1 -6.659362308662079,6 -6.746162151506149,4 -4.778188461536007,7 -6.482950317176041,2 -6.572182901017555,4 -6.977153035571581,4 -6.697080124595828,4 -6.573513082423481,1 -6.618945070824196,4 -6.829135520667372,4 -6.8060987556413695,4 -6.849524996477258,4 -4.767324608323771,7 -6.854707109417576,6 -7.233782869312423,4 -6.811932999502663,1 -6.3838253124720215,4 -6.744744457942768,4 -6.707861698970235,4 -6.948514652820179,6 -4.793535420438161,7 -6.795015799462305,6 -6.632809618564978,4 -6.598404135312052,1 -6.660777562252721,6 -6.508885275798235,4 -6.803155701199981,4 -6.60168826270381,2 -4.650293287390867,7 -6.763043351942853,3 -6.748515493435586,6 -6.584840840787549,4 -6.330616256304962,3 -6.713333297736223,6 -6.71213430490075,4 -6.983765131066388,4 -6.762085297544339,4 -6.623474823712336,4 -4.860505260286573,0 -6.824357209036116,6 -6.502199595488896,4 -6.618085024671698,5 -6.67162131615852,4 -6.867489949501145,4 -6.918745596757944,4 -6.8263717349551065,4 -6.743118366792812,4 -6.54738527629201,4 -6.678662336966622,4 -6.69106384053542,5 -6.5795483565914035,4 -4.672893731130056,0 -6.8442625830930925,6 -6.505836658516375,4 -6.782959708282549,4 -4.906104026610493,0 -6.859230706366937,6 -6.772420554854872,4 -6.824617701942945,4 -6.745118310770669,4 -6.481227953485937,5 -6.822615382118041,4 -6.803450978581003,4 -6.843423589265219,6 -6.803796152413582,4 -6.315683550668924,3 -6.600180423481633,6 -6.7229388606649785,4 -6.856667450295595,4 -6.644828217246306,4 -4.625306499499098,7 -6.813687915496606,1 -5.002657444466438,0 -6.531789074920601,3 -6.686279646799915,6 -6.473282789512179,5 -6.770033862033054,6 -6.68321348039515,4 -6.948683028445439,4 -6.739134063818074,4 -6.760680799488955,4 -6.788198611894324,1 -6.605782836861434,4 -7.083871922749483,4 -6.71694987431531,4 -6.678477273251375,4 -6.12752277590832,3 -6.777523263542053,6 -6.588030901946514,5 -4.669723302865819,0 -6.477444485301134,5 -6.717531682293459,4 -6.726620170046388,4 -4.874530008658597,0 -6.725975907931507,4 -6.814856769645765,4 -6.774831938440361,4 -7.047879907427848,4 -6.817668129770284,4 -6.841854300440983,4 -6.954224132176878,4 -6.603980314282387,4 -6.629171307018236,2 -6.774128312585302,1 -6.667399564554367,6 -6.536587191451345,4 -6.992866143687788,4 -4.938750763762286,0 -6.958753458919474,6 -6.639058571981277,4 -6.591566948141841,3 -6.710067406355494,6 -6.641756878936272,4 -4.846915517524277,0 -6.780330559773491,6 -6.675916500094401,4 -6.563646257365943,3 -6.659848163684874,6 -6.366129059287656,3 -6.7981033758475,6 -6.472124455050735,4 -6.9031550011807425,4 -6.774868514364507,4 -6.850761337926462,4 -6.7991037293234236,4 -6.930997914084326,4 -6.772688069581671,4 -6.796037396954617,1 -6.694139203029599,6 -6.732263983723029,4 -6.800001659663365,4 -6.540540434710462,4 -6.868360915121768,4 -6.86103304112987,4 -6.766877108435066,4 -7.012244941780353,6 -6.652144105238907,4 -6.931726591861825,4 -6.631735903167242,4 -6.815519005045269,4 -6.767227046793539,4 -6.918116178026723,4 -6.735137681783495,4 -6.766754441182547,4 -6.821064368268166,4 -6.697734415791911,5 -4.823285004384703,0 -4.635148187203351,7 -6.5167628087597915,1 -6.654708962097049,5 -6.459693744071442,6 -4.716360959845202,0 -6.59870799070135,2 -7.122237871861725,3 -6.482022375865232,5 -4.792696799280564,0 -6.9028384215843595,3 -6.622071166298342,4 -6.647557778769773,2 -4.827539079910877,0 -6.408018834230605,5 -4.873586593337281,7 -6.799114675492881,3 -6.94859773889915,2 -6.775840617556914,2 -4.650929559986124,7 -6.888086935134809,3 -6.576900916727333,5 -6.369338078558322,2 -6.863469879631604,3 -6.4215878588239494,5 -4.815908277361602,7 -6.78919335450056,4 -6.563879452677828,6 -7.167585554987423,4 -4.76165406615815,7 -4.977640245813002,7 -4.984224444749858,7 -6.980807088489624,3 -4.853467984449011,0 -6.777681759187582,3 -6.6007718041208685,2 -6.232754912802419,1 -6.706525606450281,4 -4.734387237001404,0 -4.873580393385768,0 -6.742246933345588,2 -4.688768434091538,7 -6.509917670001348,1 -4.659969158396473,7 -6.871499021538541,3 -6.631026010797037,2 -4.828262918517976,0 -6.531733537695761,5 -6.855594256526092,4 -6.401390743788791,2 -6.802760293899608,4 -6.528966346151733,2 -6.9588137281402815,2 -6.762834162053043,3 -7.065445038441534,3 -6.788811666306115,4 -6.356765022725453,2 -7.098901819304809,3 -6.596295800413591,4 -6.421233182056267,2 -6.905755653951364,2 -7.1545432629066665,3 -4.967911061119581,7 -6.421238876633122,2 -7.00490927915352,3 -7.189061956773233,3 -6.753106528538649,1 -6.768232784085998,5 -6.8552929284035935,4 -6.292864502155633,2 -7.187014172650353,3 -4.697638948566852,7 -6.80981804087512,5 -6.339188825955448,2 -6.769775371892573,6 -6.9390400433831,2 -4.548940801033496,7 -6.445896774816368,2 -4.675966628082102,7 -6.505039186749438,1 -6.5194940281500084,2 -4.631032580935827,7 -6.472272747757674,1 -6.707111567680889,5 -6.462200084115711,2 -6.859405916040709,3 -6.6803130580784,1 -6.820516375878397,4 -4.94375551256263,0 -6.609449519233942,2 -6.48396509906263,6 -6.67430390336361,2 -6.66274427855732,4 -7.2628840333251885,4 -6.396187355097056,5 -6.56494380540492,2 -6.782476325696469,4 -4.757921937187006,0 -6.672422512048272,2 -6.923534489030671,3 -6.752708552704308,2 -6.492685613184918,6 -6.900255750512697,2 -6.723538853429134,6 -6.742138434927253,2 -6.415265609226573,1 -4.891964863064094,0 -6.612732046323518,2 -6.710555361844215,6 -6.4548617796856425,2 -7.010861834575822,2 -6.7164152265177774,2 -6.988566977177661,3 -6.404110826804224,5 -6.434090246551272,6 -6.860124580916383,6 -7.075268500079475,4 -6.557123738362252,2 -4.778420333741191,0 -6.558878407227328,6 -6.893733949237588,4 -6.542373408140746,2 -6.764820111434654,2 -7.078348428533176,3 -6.533149770339798,2 -4.775047235585551,0 -6.487570253013024,6 -6.733842443241194,2 -6.834490102492627,2 -6.708530269332377,2 -6.864687470793628,2 -6.840312267893454,2 -6.739205149542183,6 -6.5076516140486085,2 -6.500483745048138,5 -6.5788097184473795,6 -6.798921690449455,3 -6.483746625792875,6 -6.6666025207194455,2 -6.833434077352727,2 -6.766571567617903,2 -6.574868272257214,6 -6.636254017632267,2 -6.7608197038586315,2 -6.35529959627112,1 -6.407441379742723,6 -6.658717048724773,2 -6.81993284111706,2 -6.6132886614667745,6 -6.494193609314183,5 -6.954650572039939,5 -6.746468285761106,3 -6.923249542442416,2 -4.81423122968154,7 -6.332643033136488,2 -6.734012130967666,2 -6.768717160574915,2 -7.09094156916065,2 -6.3398400642558155,1 -4.701260614675717,7 -5.0412405107163485,7 -6.395984300359229,1 -4.876316714428048,7 -6.545411898265267,2 -7.035797295908074,2 -6.926666877437278,2 -6.482906223320097,1 -4.831004053965793,7 -6.312634170632582,2 -6.862501390651541,2 -6.433059036247722,6 -6.5232941143498335,2 -6.452163791563293,5 -6.793339557448164,5 -6.253338005863943,2 -7.093902451937369,2 -6.890757936963002,2 -6.586556900352675,2 -6.651465937562919,2 -6.608873804808014,2 -7.051623625590537,3 -7.217988858252487,3 -6.676806526959112,2 -6.706794351322843,2 -4.812032727388289,7 -6.646794858499694,1 -4.706991447338778,7 -6.3059423441327525,2 -4.881784556921243,0 -6.780861325785067,2 -6.586893491025596,2 -6.641988780762483,2 -6.621161253488288,4 -6.48100416692443,2 -6.692645387519087,2 -6.767453363161778,2 -4.783431444843133,0 -4.879327369757625,0 -6.480475844747032,6 -6.6142075204833475,2 -6.952125943380504,2 -6.596347717997342,1 -4.857509526440352,7 -6.856207856153847,5 -4.90265082481163,7 -6.457431111423692,2 -6.974024390165155,2 -6.783533329417106,4 -6.561408173586828,1 -4.746922438426196,7 -6.710759687082876,2 -4.651763014567645,7 -6.5890845458451155,2 -6.785959377832711,6 -6.885859154950958,4 -6.604099536764682,2 -6.602713367408438,1 -4.585476566452445,7 -6.839290806153919,4 -6.459639639710905,2 -6.885126865535629,2 -6.868867489977678,2 -6.761800577997985,2 -7.059461788440483,3 -6.630486714958755,1 -4.836215096032388,7 -6.483358628945836,2 -6.643452724008156,2 -6.4170750169790916,1 -6.683912349383634,4 -6.497105455766894,2 -7.082570985190095,2 -6.873647200001074,2 -6.783719194952966,2 -6.636433846848785,2 -6.853966605978643,2 -6.903812839208045,2 -6.7381479224092145,2 -6.89953889067702,2 -6.807741195041877,2 -6.800351618590426,2 -6.945140404445634,2 -6.772511153055815,2 -6.828595666911729,2 -6.678958931383526,2 -6.664435687969451,2 -6.687281510410486,2 -6.8714480427734905,2 -6.909989779354218,3 -6.766627368536957,4 -6.483095440616445,2 -6.9965327633836605,2 -4.750256494981228,7 -6.390221779315762,2 -6.403316609009817,5 -6.466650853684245,2 -6.813965533765609,2 -6.878946366790471,2 -6.769350152249223,2 -6.612047217603581,2 -6.763969046940287,2 -6.8655225647142455,2 -6.874496032282726,2 -6.418827935219188,6 -6.532199123683955,5 -6.570624885342408,2 -6.717865900078263,2 -6.765124112136088,3 -6.625974956910331,2 -6.704411178707384,2 -6.829155035148023,2 -6.800219444717845,2 -6.963587517506126,2 -6.7583310870223725,2 -6.883944339243247,2 -4.772907503350589,0 -6.731609392681313,2 -6.786530127720396,2 -6.835999870083502,2 -7.012508952311648,3 -6.586349527422024,2 -6.779643366876816,2 -6.752710863033338,4 -6.418795534473724,2 -6.694621214310879,4 -6.411475641062907,2 -6.700688812286194,2 -6.868136607221391,2 -6.769557948453097,2 -6.908727067177682,2 -4.686629878188014,7 -6.513922247053884,2 -6.564937630970087,2 -6.78805015524471,2 -6.781401240930659,2 -6.950073638071977,2 -6.818839585686646,2 -4.827305899962839,0 -6.625725206442119,2 -6.646622070222393,2 -6.870466810229721,2 -6.8948757433700685,2 -6.866622218716586,2 -6.847684360522711,2 -6.89662314038699,2 -7.272723851702416,2 -6.847180150132428,2 -6.630883188234954,2 -6.680487138826483,2 -6.548402428210816,1 -4.761890587728286,7 -6.56999685971989,2 -6.814115025133901,2 -7.075664524757548,2 -6.918658652245819,2 -6.31142200398225,5 -6.594928259008886,1 -4.688039162055762,7 -6.519604681350461,2 -6.725006441523548,2 -6.163111189599671,5 -6.396873046243297,2 -6.917376818442564,2 -6.7811375857116705,2 -6.795062382842575,2 -6.705639174037923,2 -6.833018169023959,2 -6.783308355660662,2 -6.670381685229752,2 -6.811268409520093,2 -6.709129591027025,2 -6.8376608884184815,2 -6.82402511105268,2 -6.985182237763901,2 -6.726436572881194,2 -7.115010654693477,3 -6.7446693431509175,2 -6.951562939265463,2 -6.621312964560535,2 -6.7143411157288675,2 -6.830855930819298,2 -6.649300525313295,2 -6.6930306651681315,1 -4.698618446407919,7 -4.635932410885211,0 -4.647017285341473,7 -6.636591189675829,2 -6.785216157740697,2 -6.823547362954864,2 -6.784595793657796,2 -6.912384525148694,2 -6.768469900660265,2 -6.899381314178442,2 -6.925565670119831,2 -6.811762840913272,2 -6.733789553340575,2 -6.701401985579347,2 -6.925250548737162,2 -6.654645668146807,2 -6.6685727336737814,1 -4.779400531373413,7 -6.627109127277059,2 -6.985863471449841,2 -6.817486530826296,2 -6.836012076080973,2 -6.637037748054982,6 -6.935166394681237,4 diff --git a/openAI_RRM/data_20190419/rrmagent_3112_3.csv b/openAI_RRM/data_20190419/rrmagent_3112_3.csv deleted file mode 100644 index a3d7e98..0000000 --- a/openAI_RRM/data_20190419/rrmagent_3112_3.csv +++ /dev/null @@ -1,649 +0,0 @@ -6.887414075721828,2 -4.718094846278722,7 -6.745715373742166,3 -4.757561452404991,0 -6.580142200678387,4 -6.519030446950019,2 -6.606719365968726,4 -6.831207870892592,2 -6.51466787135461,1 -6.67876258597696,1 -6.789291429980543,2 -7.0912923728200346,2 -6.551059674655531,6 -4.826524686892608,7 -6.9017521860488324,2 -6.514206435294882,1 -6.897927722646564,1 -6.55987973853171,3 -7.014662701014349,3 -6.502130353906492,6 -4.60373604273855,0 -6.654293467994039,5 -6.699415409992699,4 -6.844466084521565,4 -6.646575929069802,2 -6.45942718445601,4 -4.772038656673253,7 -6.64857935557543,1 -4.71420187097159,7 -6.423967452673019,4 -6.768124799325541,4 -6.870955558879764,5 -7.035669799797182,5 -6.313959650947179,6 -6.894295849082735,2 -6.762524778844077,3 -6.308210419194784,4 -4.680643893446482,7 -6.698401164014321,3 -6.974325922968385,2 -7.007210618384056,2 -7.169481754150559,2 -6.929177275760532,2 -4.6941802656065015,7 -4.798523080237986,0 -6.410273914520817,6 -6.230026241111211,1 -6.613315769373009,2 -4.689326824151603,0 -6.513566393273335,6 -6.8159433031063275,5 -6.8530040157804395,1 -6.730235059546604,2 -6.668082532903461,1 -6.54314173603967,4 -6.390870926647216,3 -6.885324765577585,3 -6.90850304211806,2 -6.577035839392649,6 -6.148857261474677,1 -6.195032409143339,6 -6.849234090386698,2 -4.863886358024637,0 -6.440542306935673,3 -7.100915002452575,2 -6.540359453399636,3 -6.563906955893377,1 -4.743101535452526,7 -6.902994118623044,2 -6.727867438504598,6 -4.892482183912744,7 -6.805385819832114,6 -6.48410190973385,1 -6.523927358521567,2 -6.553941528314474,4 -6.8309792013697095,2 -6.327743849529155,1 -6.863957220418823,2 -6.4574053784687155,4 -4.644487834632592,7 -6.928971912404324,5 -6.85837411521055,2 -7.28618092505268,2 -4.860219532828274,0 -6.971702766053291,2 -6.750181134223152,5 -4.918390064872268,7 -5.126304398134322,7 -6.701378988629157,2 -4.858943406500164,7 -7.035544168929763,2 -7.067066983479298,2 -6.5285969242258535,4 -6.580757159024011,6 -4.842042180694304,7 -4.589554056691207,0 -6.921040782598586,2 -6.851013105499092,3 -6.558296270877381,1 -6.430710665044835,6 -7.076190348771562,2 -7.189980402761332,2 -7.077775890354153,2 -6.395484815075892,1 -6.593771967377323,3 -6.991932722122497,2 -7.060475266582605,2 -7.171012785435965,2 -4.933983464191646,0 -6.36878988903209,3 -6.286761549880828,4 -4.784691588113301,0 -6.95544945934303,2 -7.0713313181832165,2 -7.2729547136353245,2 -7.2930797692772105,2 -6.683729317991438,6 -6.832409643564744,5 -4.906196108368731,7 -6.731821652529674,2 -7.199192073778076,2 -4.655510075698499,0 -7.008771945947289,2 -4.785805347590032,7 -6.807180211792257,5 -6.86741783089598,1 -6.701331351656759,2 -6.637339131106578,5 -6.80546159147218,1 -4.745919665710972,7 -6.876547655371041,2 -7.027094813153536,2 -4.756896591452014,7 -6.575504963080581,3 -7.0014611424048185,2 -7.0418840645946545,2 -7.191248556944677,2 -7.02160977465606,2 -6.408702970557842,1 -6.898262747239636,2 -4.581495345151043,7 -6.908506879043952,2 -6.434161119317006,3 -6.947232462576488,2 -4.708960829833484,0 -7.08185820027288,2 -7.161783029333725,2 -7.191734434688489,2 -7.087128160415076,2 -4.762765317378505,0 -4.557241565839825,7 -6.666558866254259,4 -6.6640588558406595,6 -6.759878395519751,4 -6.626491993549795,1 -6.7623172664080045,2 -6.58259795568086,1 -6.804978108415213,2 -7.050914928709833,2 -4.641054885411157,0 -7.17810565746133,2 -7.2008244417404885,2 -6.840347572681952,2 -4.752326258748059,7 -6.6883686915662315,2 -6.6623615055393905,5 -6.459695545755975,6 -4.88868960128648,7 -6.9296522785329575,2 -6.652453553677391,1 -6.617495707793623,3 -6.887491828216331,2 -6.54781275937773,6 -6.401470417800427,1 -6.589735368202199,2 -7.209545406781348,2 -7.1727631328482815,2 -7.151875415633828,2 -6.803297939677086,3 -6.763196211195993,3 -6.401484217353945,4 -6.588074741760973,1 -6.486824422855505,4 -6.7356726519784464,1 -6.808877721452447,1 -6.79555275926726,2 -6.955482634559968,2 -6.8544088061047495,3 -6.300067201197185,4 -6.3089800913989835,1 -6.710099680718566,2 -6.3874049096477155,1 -6.760708356988134,2 -7.10087681517851,2 -7.235051297126325,2 -7.124104164650502,2 -7.411862412284919,2 -7.029554151884315,2 -7.05042841133991,2 -7.211621281054053,2 -7.0258414478544795,2 -6.686087806509318,4 -6.44524825317387,1 -6.843843215432838,5 -6.873107405657101,1 -6.8915959211554565,2 -4.750748752487059,7 -6.655461383981394,1 -6.737142103085787,2 -7.136727054305774,2 -6.831624841618182,5 -6.34653933889665,3 -6.967350842850236,2 -7.102139897324151,2 -7.07879277822515,2 -7.257984096467356,2 -7.13750134518493,2 -7.012586586686245,2 -4.848102641637577,7 -6.695741122291507,2 -6.527036197764915,4 -6.240584443105967,1 -6.9267568417244005,2 -7.0848750742742785,2 -6.522376132697514,6 -6.306006032286709,1 -6.652265613018879,2 -7.119636205910694,2 -7.344159422231172,2 -7.132749950795036,2 -7.294927924500635,2 -7.2265954716171485,2 -4.754309025762997,0 -6.938898417208357,2 -7.145289147284758,2 -7.034522718323233,2 -6.885001323494666,2 -7.012511403566747,2 -6.516682549608479,4 -6.744503531856503,1 -4.869438342697354,0 -4.54457178167707,7 -6.867376728999767,2 -6.857255678300523,3 -7.03104723694376,2 -4.783039609375335,7 -6.55283075986969,1 -6.788698211543853,2 -6.9644160398801045,2 -6.5329963993211475,4 -6.762819837565891,1 -6.67675829775959,2 -7.149672119789429,2 -7.166402790973037,2 -7.2095002738287,2 -4.744921317781666,7 -6.871047955232303,2 -6.883542161014978,2 -7.145426983242385,2 -6.674902854788758,1 -6.671413059301875,2 -7.137441007206579,2 -7.104653527945069,2 -7.1586702833288225,2 -7.226731928221141,2 -6.404613549126704,4 -6.4287243026805285,1 -6.814283932005976,2 -7.112815367268273,2 -6.995124430932497,2 -7.039095946181671,2 -7.116901448229564,2 -7.0746564658414375,2 -6.85944082977186,2 -6.785826930211253,6 -4.7585535074022935,0 -4.449516066206918,7 -6.695981744924542,3 -7.075410937775038,2 -7.012997808823056,2 -7.030710192771588,2 -7.276335492370618,2 -6.9411753711461674,2 -7.229854604248237,2 -7.247507146300592,2 -6.744714674149321,6 -6.311458986015041,1 -6.711572225412712,2 -7.034969345311512,2 -7.123690536001078,2 -7.184111400254236,2 -7.136467748818542,2 -7.086570537325417,2 -6.973140423321154,2 -4.768528091354929,7 -6.429090653647949,1 -6.5476688917806,2 -7.229140629557616,2 -6.601749684618846,5 -4.775147451343484,0 -6.941425686739179,2 -7.2639514002462215,2 -6.833188052901727,5 -6.610028266819968,1 -6.92220776713778,2 -6.8364723797842055,2 -6.8871656711361045,2 -7.153710964443773,2 -6.702590816177261,6 -6.296441486450073,1 -6.7814177573376,2 -7.146117305233009,2 -7.050018378015808,2 -6.6571972718864405,3 -6.933916825893602,2 -7.487437857685921,2 -7.205937020518502,2 -7.250600300183149,2 -7.207645994870185,2 -7.019357343671249,2 -6.692901929110092,5 -6.502959357976431,6 -6.300180069651586,1 -6.74340823806821,2 -7.222159722217586,2 -7.0982792565901365,2 -6.979675397039113,2 -7.135653121986886,2 -6.964618525012605,2 -7.1565351968892665,2 -7.132309776258849,2 -7.153447707263973,2 -7.165405336449491,2 -7.098456054373325,2 -6.452088082615827,6 -6.606857042838057,3 -7.034366456663312,2 -7.110459937903316,2 -7.166913030154357,2 -7.083134030021662,2 -7.228830376187485,2 -7.042685242551297,2 -6.998790417684559,2 -7.014252053659015,2 -7.085371144596009,2 -7.170040841556342,2 -7.047653465505704,2 -7.187819352423159,2 -4.7138783092045236,0 -6.883527361595422,2 -7.2116417840271,2 -7.071026345145955,2 -7.185935230284382,2 -6.972745648723549,2 -6.795626667462495,5 -6.668103920988512,1 -6.981959412831372,2 -7.109778174221166,2 -6.911523114830347,2 -7.086308331834429,2 -6.624731098711953,4 -6.558135300071395,1 -6.737065600487769,2 -6.9865672821677665,2 -7.121623230363212,2 -6.647210108314044,4 -6.509722897539493,1 -6.882841989421272,2 -7.191812899309835,2 -7.092283542225987,2 -7.205291086483647,2 -7.225212028848427,2 -6.924731637570205,2 -7.049846155840826,2 -7.111082019439738,2 -7.071725794661011,2 -7.221904682733002,2 -7.086552441590457,2 -7.064318658066902,2 -7.261929611836799,2 -7.148395570532595,2 -6.58001101905215,1 -6.952663011294758,2 -6.989779059970114,2 -7.156297858101745,2 -7.143762122706454,2 -7.146652290568226,2 -6.3567426354901295,4 -6.705227940705013,2 -7.001567990950858,2 -7.010520335493877,2 -6.9536584032521995,2 -4.857992361773129,7 -6.906054152817012,2 -7.10422946039795,2 -6.997740910221751,2 -6.5962035140541575,6 -6.529547269864857,1 -6.861066210386174,2 -7.135333876072216,2 -6.9577005211402785,2 -6.560431096630726,3 -6.783843385856799,2 -6.512911377233959,4 -6.541127113210209,2 -6.998871896281996,2 -6.33018844402624,1 -6.852180334621885,2 -7.172757833096405,2 -7.271656412612685,2 -7.182653047407584,2 -7.068339331753437,2 -7.119377698094983,2 -7.060226415307908,2 -7.064161604936249,2 -7.174046688032874,2 -6.911041779570373,2 -4.706855327698724,7 -6.902371952593784,2 -7.1739124981341895,2 -4.8759044367369855,0 -6.904076226094892,2 -6.89051914472916,2 -7.0012621119824034,2 -6.51053883717375,1 -6.826065130669118,2 -6.513920420344528,1 -6.9231677046411315,2 -7.125494844972464,2 -7.169116483830683,2 -6.527833355483084,6 -6.832403286938247,2 -7.251195421985401,2 -7.211289303278462,2 -6.829431895738969,2 -7.057265538395093,2 -7.0778022991581695,2 -7.174677987979819,2 -7.08773268438678,2 -7.008074278448999,2 -7.160141068056018,2 -7.079424937157954,2 -7.017435778298737,2 -7.19429781071124,2 -6.792218787058258,2 -7.123032376723284,2 -7.014074174796017,2 -6.9956849187503005,2 -6.4356397913456345,1 -6.769603086555082,2 -7.081156749730198,2 -6.879831526457943,2 -7.0791632164523595,2 -7.007860130893554,2 -6.816001437884218,2 -7.23623371951579,2 -6.974629442979263,2 -6.973942472510182,2 -7.062825608877885,2 -4.682139883647961,0 -6.788974327610149,2 -6.880302641534022,2 -7.090699070855067,2 -7.096827166494079,2 -7.1047363337881855,2 -7.193828279733114,2 -7.13960758937641,2 -6.847858110743724,2 -7.08245037911488,2 -7.058716105600585,2 -7.022285441101827,2 -6.9967933416183135,2 -6.674473982795243,3 -6.92275015527668,2 -7.0641844134202865,2 -6.977197476849362,2 -7.110859648621506,2 -7.068556290240374,2 -7.012571703384504,2 -7.054728587000559,2 -6.917926384133066,2 -7.014963523796807,2 -7.321234164433157,2 -7.167252492185729,2 -6.4156639721861355,1 -6.824639500503156,2 -7.266777501358706,2 -6.863087634620158,2 -7.0116303083174865,2 -4.917523463739941,0 -6.876802314216125,2 -7.351225056688073,2 -7.515833917020921,2 -7.097786484934793,2 -6.993336754637654,2 -7.0127723068664265,2 -7.135369157973508,2 -7.042094324700373,2 -6.887045801961919,2 -7.110962565526885,2 -6.979036225463545,2 -7.094723017804269,2 -7.157221512820721,2 -6.974431715662656,2 -6.443214487537616,3 -6.95112343302025,2 -7.35741941738186,2 -6.897533866648619,2 -6.981641844172393,2 -6.861378191753909,2 -7.096689840960232,2 -6.626306456526658,3 -6.84036176519341,2 -7.002164684105142,2 -7.023943295328678,2 -7.177482230528363,2 -6.868978064844113,2 -6.897396262881111,2 -7.231673727188659,2 -7.010476194243184,2 -7.097899033827731,2 -6.854420846718951,2 -7.059851741412765,2 -7.207930161005869,2 -7.077231577443846,2 -7.010776581603543,2 -6.5007482067674225,3 -7.086983717963149,2 -6.906241577418293,2 -7.181472349202002,2 -7.216338941639041,2 -7.0408553437840125,2 -7.0228470472970095,2 -6.820820097403597,2 -7.104966151194603,2 -7.0896842121902885,2 -7.1166877781552085,2 -7.097171988761735,2 -7.049877617745723,2 -7.062109766798581,2 -7.057720256636125,2 -6.957797762511305,2 -7.2622534825267575,2 -6.86222943440757,2 -6.943569443952972,2 -7.084878014586918,2 -6.850975768546169,2 -7.200819837050831,2 -6.996716236183808,2 -7.078408529625916,2 -7.244771270725372,2 -7.107736082974409,2 -7.274714987083584,2 -7.136973165545357,2 -6.603548469687574,1 -7.055498014524659,5 -6.789053727978237,2 -7.0586944875882836,2 -6.85064405416892,2 -7.206814835189716,2 -6.978608139597854,2 -7.226254050639136,2 -7.118182949181383,2 -7.0983041006882175,2 -7.085803373697984,2 -7.165458464281373,2 -7.148349586553211,2 -7.161628727324905,2 -7.178358433974093,2 -6.988503297405536,2 -7.079338759677325,2 -7.142110376384156,2 -7.005709544909307,2 -7.071497358779302,2 -7.2224903095786095,2 -6.960133136711547,2 -7.256393508006335,2 -7.1326919106058515,2 -4.923022742970568,0 -6.876454442158703,2 -7.1480359548070584,2 -7.2141627317983135,2 -6.897990424703015,2 -7.076121453421546,2 -6.98211539729399,2 -7.001768770634957,2 -7.2359458455695345,2 -7.304864500158633,2 -6.969735230968,2 -7.088957853931972,2 -7.248387701454592,2 -7.126834625006146,2 -7.08992949317426,2 -7.116092354473977,2 -7.205832621218591,2 -7.067802646176762,2 -7.097805945053139,2 -6.828896722383232,2 -7.109948673222116,2 -7.189584087050889,2 -7.241571803461217,2 -7.032439054029599,2 -7.147023361277949,2 -6.870970418266907,2 -7.226900249876181,2 -6.913065107547596,2 -6.9951054132625154,2 -7.109313050947658,2 -6.998822008971463,2 -6.9854538243012465,2 -7.046426332056661,2 -7.291565998460805,2 -6.811100902166013,2 -7.008431224790929,2 -7.093190605577955,2 -7.045317879433544,2 -7.159247779879753,2 -6.799417190751053,2 -6.971244415269992,2 -7.171617878570086,2 -7.170062259126816,2 -6.995099912184919,2 -7.093345865065735,2 -7.111165030141072,2 -7.247631117834421,2 -7.079055958869053,2 -7.215430628394277,2 -7.214125378164659,2 -6.988167698532716,2 -7.170444062581934,2 -7.098947817399639,2 -7.243233925001257,2 -7.029930427405382,2 -7.180492203367446,2 -7.2115652681525315,2 -7.199516088117404,2 -7.149843501929368,2 -7.225551840974223,2 -6.830932997963846,2 -7.173364018235571,2 -7.12555224859319,2 -6.940204221896011,2 -7.030163993619739,2 -7.04151347995633,2 -7.18284390348669,2 -7.234418831513361,2 -4.741235987742794,0 -6.690727247292909,2 -6.987032740160663,2 diff --git a/openAI_RRM/data_20190419/rrmagent_3222_1.csv b/openAI_RRM/data_20190419/rrmagent_3222_1.csv deleted file mode 100644 index 91112f6..0000000 --- a/openAI_RRM/data_20190419/rrmagent_3222_1.csv +++ /dev/null @@ -1,410 +0,0 @@ -9.812701874373218,3 -8.491455891586568,1 -8.552735099464131,1 -10.480649320172567,3 -8.353748795705739,1 -6.713028227923214,7 -6.804499909640788,0 -10.534349996101161,4 -10.058300640839603,3 -10.822875600023096,3 -7.9171903705650255,5 -8.63508371836414,5 -8.3961718089497,1 -10.407205257069606,3 -8.436113213311248,2 -8.71941977133064,2 -8.370296301279309,2 -8.147990830291437,1 -6.630126368940135,7 -8.352385203896981,5 -10.140343417887541,3 -8.273216494003172,5 -8.32597695807032,6 -10.566939197551829,4 -9.873022866863991,3 -8.401251922058343,6 -8.090871352727527,1 -10.180787514809582,3 -8.143780434902322,2 -8.458654751644254,6 -6.841444037687499,0 -8.070431644614857,6 -8.094247136421911,1 -10.473070383720472,3 -7.063550298237454,7 -6.5748915640044245,0 -10.13185971497084,3 -6.894477952687428,7 -10.189379882407454,4 -8.267066670476751,1 -8.54376819029258,1 -10.441426946156616,3 -10.629030259779706,3 -8.520580716189295,1 -8.270433660347884,5 -6.5737285550335285,0 -10.399260064347482,4 -6.832878535810282,7 -7.175321025668087,7 -8.3006118372142,6 -8.50996559136298,2 -8.473300498139965,6 -7.925314486912709,1 -7.707609953305794,6 -6.607454025576688,0 -10.046695922410825,3 -8.11017718063051,6 -6.930185361617012,7 -7.035241479495859,7 -10.63368026099255,3 -10.661188661074862,3 -6.645326723198645,0 -10.122629930296938,3 -8.213103587243419,5 -10.601620178071082,4 -8.270869100052874,2 -8.09952851682838,6 -6.73226653634534,0 -9.992368142142649,3 -8.293966967008387,1 -10.491109159575645,3 -10.422720595857514,3 -8.172186072473867,6 -7.983178851762265,5 -10.405710418039696,4 -8.139248364573673,2 -6.612588299887981,7 -8.354084586890494,5 -10.470226734520988,4 -8.082329977811685,1 -6.793634122863746,7 -10.379388355572543,3 -10.404260361077162,3 -8.40122041576632,1 -10.232727412727126,3 -10.716893637695778,3 -8.26858348921157,2 -8.614508922859896,2 -10.42862818703155,3 -8.61851845453594,2 -10.47422562769693,3 -8.176612516671609,6 -8.083117814079406,1 -8.487562996678355,5 -10.678794628969264,4 -8.208322633043222,1 -10.36929075097516,3 -10.802278496211496,3 -6.925025890274648,7 -10.383218150649821,3 -8.419668207007518,1 -8.614639861371119,1 -8.515515051033775,1 -6.826114474996304,0 -8.456395220452896,2 -10.377957932565385,3 -8.559999667610386,1 -8.038153939819617,6 -7.943851607502202,1 -10.450706061357417,4 -6.555885181109118,7 -10.388919409154425,3 -6.850897263969735,7 -10.324309930242991,3 -10.674415440918478,3 -10.555221340355223,3 -10.496971956161351,3 -6.859738763894373,0 -6.595300970501708,7 -10.401974561479532,3 -9.88508540164419,4 -7.991846214223993,1 -10.424255132486753,3 -10.560130013445312,3 -7.0494083411891015,7 -10.091908409113627,4 -8.242666310087218,1 -10.216820003590433,3 -8.293862437008212,1 -8.680742332473965,1 -10.565012975376812,3 -10.729047300547636,3 -10.774728451473253,3 -8.01183033625074,6 -8.12416095719434,1 -8.722390416078998,1 -8.388866670620052,5 -6.915604440502616,7 -8.28430896211517,6 -7.984530679854959,1 -8.42004052433898,5 -8.459714253582497,5 -7.391201056736812,7 -10.222913231810201,3 -10.67948772308116,3 -10.693990434444867,3 -10.462514644794803,3 -10.705410740156347,3 -10.701641569150567,3 -8.290150239528934,2 -10.581536980034361,3 -8.504384662250185,1 -10.525265872916329,3 -10.510327794202112,3 -8.572781683212142,1 -8.113515410606592,6 -8.001731951095948,1 -6.893966888715342,0 -6.6743275243755775,7 -6.695838671996895,0 -10.255220458922572,3 -10.583666198449668,3 -6.879734044133519,0 -10.102698525438592,3 -6.69705076655632,0 -10.117571628821503,3 -8.14167048381494,5 -10.295211651827099,3 -10.654029180865681,3 -10.77065788928542,3 -10.690682793224322,3 -9.878706958353876,4 -8.297585886740853,1 -8.570384805914466,1 -10.441559407320524,3 -10.714132568283262,3 -8.204023712258573,1 -6.739683701312867,7 -8.406192387126527,5 -10.254056331330977,3 -10.41653156355472,3 -10.360023111919716,3 -10.684871346619333,3 -10.50854981592191,3 -10.859092906626884,3 -10.433712841681743,3 -10.54630573288137,3 -6.664241809342063,0 -8.061874592096977,5 -10.219106403033388,3 -10.666443426992437,3 -10.526119060990172,3 -10.693862616246916,3 -6.74346258825814,7 -6.683921669422815,0 -8.27360700410578,2 -10.406009175070611,3 -10.532626395986274,3 -10.535234768223978,3 -10.556692814193914,3 -9.823833027682646,4 -8.20759446595856,1 -10.54525936983597,3 -10.640774493468534,3 -10.716544215316713,3 -8.246078711104099,5 -10.180935292720422,3 -10.761046934108089,3 -10.409252852577334,3 -10.574424773663523,3 -10.238918267110305,3 -10.654794649137507,3 -10.563045147068518,3 -7.074716826647925,7 -10.314161162618067,3 -10.678025350044567,3 -10.760028263331359,3 -10.701908105341763,3 -8.074033504959882,6 -8.146765147522075,1 -10.360311053452575,3 -10.86639640874286,3 -10.821817106286277,3 -10.684215451412012,3 -10.525650586565378,3 -10.477642206289628,3 -10.399167782626385,3 -10.782317322860878,3 -8.231248772125598,1 -10.503103512076995,3 -7.020854977739524,7 -10.53648554124922,3 -10.665571381027666,3 -6.800087598825165,0 -10.209039907849863,3 -10.620009615905662,3 -10.820212546652904,3 -10.661710378011083,3 -10.598900921137384,3 -8.305465603796193,2 -10.32503815236893,3 -10.784097226371864,3 -10.429315726050572,3 -10.860116967366395,3 -10.79121992370331,3 -10.65945631556831,3 -10.559855370558722,3 -10.793016933732831,3 -6.843900953145404,0 -10.661961839571918,4 -10.683109153982791,4 -8.244566352439437,2 -10.371267166634231,3 -6.611447422576799,0 -10.130366362805857,3 -10.82054146344202,3 -10.636119701134051,3 -8.102889335627506,6 -8.086529097641343,1 -10.401652753587982,3 -10.581577416309893,3 -10.583143349577139,3 -6.704469529380542,0 -10.32329367625126,3 -10.774268853695371,3 -10.805329288130471,3 -8.122697278104967,6 -8.147396020004246,1 -10.361793810870536,3 -8.334890131861648,2 -10.404030572484361,3 -10.563442907679729,3 -10.737500087500873,3 -10.944184972280238,3 -10.826118172618504,3 -10.811499142982013,3 -10.45387399448265,3 -10.796078331441976,3 -8.24383813213128,6 -8.070625893703273,1 -10.41876359881867,3 -10.744096638482775,3 -10.460191049357002,3 -10.613465463759434,3 -10.71437586331135,3 -10.794853591738415,3 -8.45704063854754,2 -10.305951387942565,3 -6.726406422877,0 -8.444458175085828,1 -10.32472728327333,3 -6.844659317293802,0 -10.260745630033036,3 -10.865728023255887,3 -10.735435346014684,3 -6.854949131223283,7 -10.270628494957597,4 -8.240667372602779,1 -10.228753996246455,3 -10.6185979111892,3 -8.523447560454922,2 -8.396439434086604,6 -8.107804648422546,1 -10.501143322829627,3 -10.77085621905009,3 -10.518435870187831,3 -10.538781631912705,3 -10.728776063775552,3 -10.504603046065572,3 -10.870111064520835,3 -10.397706290884162,3 -10.64507931543049,3 -10.732523113306309,3 -10.852851765245909,3 -10.49472780829358,3 -10.874041423518108,3 -10.68838630891914,3 -10.663640148159308,3 -10.692974755691875,3 -10.636026267027566,3 -10.829006335476635,3 -10.533321562996678,3 -10.871124248604767,3 -10.769978607008918,3 -10.62481194989674,3 -10.504407227967636,3 -10.774172163537056,3 -10.623982188126941,3 -10.680470777581942,3 -10.868514055449024,3 -10.692578127467433,3 -10.662687571214384,3 -6.870025979783712,7 -8.460828807919825,2 -10.283668939249525,3 -10.42957251656412,3 -10.479326684107553,3 -10.82225151154034,3 -10.978286499484458,3 -10.250494305227312,3 -10.716519561011538,3 -10.436385334623536,3 -10.950988058274676,3 -10.7349656378842,3 -10.700080817449606,3 -10.614119382026644,3 -10.781345707331845,3 -10.506442721749641,3 -10.729049255060614,3 -10.68260534899274,3 -10.638141181317854,3 -10.611250653799207,3 -10.63950529583829,3 -10.710778503185624,3 -10.522868648257338,3 -10.365841250212066,3 -10.616292770658797,3 -10.678275161818208,3 -10.620280884016653,3 -10.78373277977204,3 -10.556204835685458,3 -10.563072429180442,3 -10.563865658885257,3 -10.41314458150014,3 -10.562002991571605,3 -10.83711962167629,3 -10.61460256587744,3 -10.276113872122211,3 -10.49668934300221,3 -10.376928871949707,3 -10.730024033860438,3 -10.73294748867524,3 -10.841449903382742,3 -10.508119745008633,3 -10.930783493712758,3 -10.551538245522213,3 -10.604884282156522,3 -10.537186589694347,3 -10.491944696853484,3 -10.636403006501219,3 -10.85891261143617,3 -10.547434032242563,3 -10.880271006072336,3 -10.57192601999418,3 -8.35904841734335,1 -10.419178710140704,3 -10.827774039939106,3 -10.767254690438127,3 -10.511086585582015,3 -10.643271696597566,3 -10.963759650587118,3 -10.526410773747479,3 -10.85761047841791,3 -10.341939876850887,3 -10.659125684960356,3 -10.553251677815094,3 -10.626926152055082,3 -10.617790323825753,3 -10.520468119191543,3 -10.604419650003935,3 -10.577072164852877,3 -10.620389333640718,3 -10.518178627368242,3 -10.751185357949021,3 -10.52210937461138,3 -10.795065220439556,3 -10.480946367937982,3 -10.504561963181349,3 -10.752860095835631,3 -6.963297769168678,7 diff --git a/openAI_RRM/data_20190419/rrmagent_3222_2.csv b/openAI_RRM/data_20190419/rrmagent_3222_2.csv deleted file mode 100644 index ca7e49e..0000000 --- a/openAI_RRM/data_20190419/rrmagent_3222_2.csv +++ /dev/null @@ -1,361 +0,0 @@ -7.921791175358099,1 -10.304165827910355,3 -8.39562033297661,5 -6.806042332939603,7 -8.471994622784736,6 -7.940285619275252,1 -8.133778014799539,2 -6.889915414023765,0 -7.274105856323,0 -8.231520140996873,2 -10.4187076735278,3 -6.83541021221711,0 -10.013617219870214,3 -8.35369843076278,5 -10.321704879310147,3 -8.297310819692164,1 -6.869537743102318,7 -8.361867359116928,5 -9.96891807780947,3 -7.890116991649993,5 -7.984244664377951,2 -6.82383120371731,0 -10.50248746833308,3 -10.14725185055538,4 -8.16996437889015,5 -8.049759219401663,6 -10.562454083293915,4 -8.149860037348647,2 -8.318715738475897,6 -6.715315258331027,0 -10.102063676649028,3 -6.86452634385401,0 -8.185814487961585,5 -10.341749941388287,4 -8.28815570600751,2 -10.656719761300584,3 -8.467878846657632,2 -8.493943022679662,2 -8.586843267649748,2 -10.089780073455353,4 -6.631916892747119,7 -6.628778047542292,0 -6.6391491764520305,7 -8.421736869407379,6 -10.388297341894912,4 -8.281438939794752,5 -8.22079473770334,1 -8.48416886392121,1 -6.562555758830719,7 -10.226378258249122,4 -10.575297996209676,4 -10.757346723531395,4 -6.9179427464867915,0 -8.131432506340552,5 -10.052682405727825,4 -11.04509087801626,4 -6.580319249925055,7 -7.0402276957925025,7 -6.935978743098107,7 -8.504593371791444,6 -10.393292164262292,4 -8.375479506102721,6 -10.492133166839785,4 -6.880569001403588,0 -10.385219356498776,4 -10.656737420291428,4 -8.276383063481218,5 -10.334060298685214,4 -6.840412843615737,7 -10.110501976430363,4 -9.970711329337453,3 -6.977856603071969,7 -10.113319345109645,4 -7.127824038542501,0 -10.370713905697333,4 -6.743213850057416,7 -10.127264198892442,4 -9.905100869546223,3 -10.759669787547896,3 -10.017106949205742,4 -6.7231491117485325,7 -8.227841474311742,5 -8.06372911343728,2 -8.203767013998842,1 -8.523994765384924,5 -8.322431201366088,1 -6.84001030060437,0 -10.351519803946857,4 -6.760750392526594,7 -7.102422768900895,7 -8.444606012265071,1 -8.23472709538929,2 -8.736483487497882,2 -10.25730627103109,4 -8.306487361039348,6 -8.263451325453314,5 -8.058435980008623,6 -7.979551563119421,1 -9.976218041734416,4 -8.361821958518465,6 -8.433869774008674,2 -10.316676055887003,4 -8.210969768560897,1 -9.945394600284063,4 -10.81358081878722,4 -10.537495590326598,4 -8.360146577889655,1 -10.166979730165071,4 -10.623345182344577,4 -10.699204683423511,4 -8.211815333013147,1 -6.876819539601698,0 -7.139445782945478,0 -8.402242134200392,5 -10.582736506063146,4 -10.015358018167365,3 -10.610122136717237,3 -10.753741549152885,3 -9.968078851770091,4 -6.740074038445991,7 -10.108440685310063,4 -10.780359006903433,4 -10.617449938221938,4 -10.522386488250222,4 -10.695646717588174,4 -10.795995050486903,4 -10.448976432164626,4 -8.347600847902761,6 -8.128830329780243,5 -10.390549593549258,4 -10.815511629288258,4 -10.901188917956734,4 -8.214591790511738,2 -10.276998679774575,4 -10.542101993602142,4 -10.567444895185783,4 -8.449601203457963,5 -8.571402432138065,5 -10.45415200914226,4 -6.700146251589285,7 -10.29370856373758,4 -8.207921083246145,5 -8.040150234780045,2 -10.192465525047862,4 -10.788275442731361,4 -6.972900124552566,0 -6.823230133334615,0 -10.49399709843417,4 -10.831871843741919,4 -10.834310514906711,4 -9.764358471933512,3 -8.479919428613059,1 -10.128596031752705,4 -8.44299468454254,6 -8.576121455796107,6 -10.343086806953348,4 -8.531085210119373,5 -6.903726415068606,7 -10.123116453305386,4 -10.711156141581819,4 -6.70898418443759,7 -10.318681262785676,4 -6.98026955160909,0 -8.133368117187675,5 -10.560032026068145,4 -8.356076427309556,6 -6.746595189300984,0 -10.206963118131528,3 -8.111193241670362,6 -10.579087579228204,4 -10.759759859684744,4 -8.093263093875226,2 -10.134693361622169,4 -10.673186301419506,4 -10.692128162916594,4 -10.740112862079208,4 -10.651749624113455,4 -10.533908583698272,4 -6.97326238919746,0 -10.5869175131808,4 -10.496245576851596,4 -8.205028170139972,2 -6.913810161308469,0 -10.327806685778576,4 -10.540033263859268,4 -8.57242110963552,5 -8.28230146341211,1 -10.414651367465117,4 -6.6739431970834735,7 -10.41740922082782,4 -8.378834984453068,2 -10.249831112866003,4 -8.29621120758886,6 -10.6600155398745,4 -8.461750142253807,6 -10.60099659816588,4 -10.598566405854577,4 -10.81826016109528,4 -9.962074616366316,3 -10.812000982150119,3 -10.202675587905514,4 -9.916660289226453,3 -10.027077782738962,4 -10.701382470657862,4 -9.670453926898375,3 -10.191578665798861,4 -8.283912315791426,5 -10.323489091229396,4 -10.558941702818505,4 -10.754114204468536,4 -10.595555276625278,4 -8.41999096119526,5 -8.015367020106583,2 -8.34481970518646,1 -10.341513269692436,4 -10.614200145294024,4 -10.812670525914497,4 -10.646479619991908,4 -8.445063981506536,5 -10.473666276064085,4 -10.406700548334626,4 -10.676856394753306,4 -10.421578608307733,4 -10.600105198633539,4 -10.627709526754309,4 -10.657621266698227,4 -6.590637751788756,7 -10.002362010167507,4 -10.469636681752474,4 -10.670811717225106,4 -10.583982223131246,4 -10.526292704286968,4 -10.685928580924447,4 -8.087776219234772,1 -10.198310397777622,4 -10.366905842792628,4 -8.472965800844936,5 -6.76594646791972,0 -10.465876267383202,4 -8.421340802595633,5 -10.381768165316752,4 -10.869488048909334,4 -10.820496142689413,4 -10.424169334622317,4 -6.606043494574032,7 -10.2297323903012,4 -10.684746816615844,4 -10.589684148298065,4 -10.423482439778809,4 -6.721666779326603,7 -10.417400671012496,3 -9.900028265339596,4 -10.62901517799361,4 -10.691163935588,4 -10.62076235181315,4 -10.457922092951087,4 -10.810615624249783,4 -10.516023428208205,4 -10.654152652253766,4 -10.563232835324534,4 -10.547239558138545,4 -8.088488869498496,2 -10.29059088744203,4 -10.52970272492916,4 -10.781346475403483,4 -10.5601864582996,4 -10.067886084132441,3 -10.082710193557311,4 -10.338081062450096,4 -10.503185800293618,4 -10.887121436525039,4 -8.4704465968209,2 -8.593868506985146,2 -10.152533479480292,4 -10.590368627189283,4 -10.606226552663204,4 -10.643616226095153,4 -10.570882939900585,4 -10.504221102345607,4 -10.662236161332334,4 -10.482982643098742,4 -8.326260336616595,2 -8.357060347079871,6 -10.398060421665367,4 -10.859239149360222,4 -9.872996262715676,3 -10.211238150212333,4 -10.501322009659319,4 -10.700849348386145,4 -10.61395384128937,4 -10.522138490419811,4 -10.71448302194948,4 -10.687240273817906,4 -10.794757044202655,4 -10.496350562403284,4 -10.621792500770624,4 -10.775945709142945,4 -10.871918877180955,4 -10.686900829095784,4 -10.783380775391203,4 -10.540709219758325,4 -10.435221787445526,4 -10.564393760184002,4 -10.44663192954248,4 -10.73417124035175,4 -10.589426130450011,4 -6.7831536390560085,7 -10.336489975670693,4 -10.742878337178318,4 -10.654721653970851,4 -10.573116435386305,4 -10.845146343710452,4 -10.33375638921193,4 -10.691169422886043,4 -6.905838895593954,0 -10.30873698143784,4 -10.6542312281361,4 -10.415658499107002,4 -10.402172229152173,4 -10.735553674155028,4 -10.663612560776357,4 -10.916200776907061,4 -10.399687627160182,4 -10.5856126680844,4 -8.414370476917933,6 -10.55725550739967,4 -10.429224861310125,4 -10.53214248524994,4 -10.513260561260884,4 -10.821590952833493,4 -8.361890749256249,6 -10.235695519676693,4 -10.70517818494561,4 -10.723088206772088,4 -10.73099161965368,4 -8.270458279444886,1 -10.16812904751264,4 -10.659481212184167,4 -10.49609247782513,4 -10.532693785763605,4 -10.574179785359448,4 -10.70827522242708,4 -10.596201771767007,4 -10.38444381697015,4 -10.594763352796685,4 -8.21906778296719,2 -10.370504095140278,4 -10.52989006153881,4 -10.438146568279988,4 -10.479097941147533,4 -10.523532564912877,4 -10.63717520444291,4 -10.72329615880591,4 -10.829484644803717,4 -10.612654118076511,4 -10.643477964938324,4 -10.734817400392043,4 -8.440177242208039,5 -10.208335404272496,4 -10.710993346220818,4 -10.484255874289232,4 diff --git a/openAI_RRM/data_20190419/thomagent_3112.csv b/openAI_RRM/data_20190419/thomagent_3112.csv deleted file mode 100644 index b74fab6..0000000 --- a/openAI_RRM/data_20190419/thomagent_3112.csv +++ /dev/null @@ -1,630 +0,0 @@ -4726.583770948741,0 -6506.01903752525,4 -6819.732848167426,5 -4665.3073948909505,0 -6520.880148479449,3 -7090.144009559101,1 -6815.772171355295,3 -6587.935526680192,5 -6790.330656799089,6 -6510.460427625048,2 -4635.488560796568,7 -6418.3345936832175,2 -4857.369464054169,0 -6902.88710268121,6 -6578.347668208352,3 -6154.052959977751,4 -6236.622211158789,3 -4875.5351192833505,7 -6292.765368571195,4 -4661.228353301766,7 -6692.739126142189,1 -6450.466157309237,4 -6574.539527291648,1 -6573.263200758867,2 -6855.893623854546,6 -4747.836474940563,7 -7091.686481823985,1 -6642.188664534659,3 -6550.501914945401,2 -4730.2447125279405,7 -6558.402532414399,4 -6480.008909957718,3 -6999.899940196131,1 -6512.001748980701,2 -6932.01451630692,2 -6768.602089446488,2 -6724.185296935027,2 -6836.766090677114,3 -6840.917247394207,1 -6298.148697050608,4 -6686.172673655941,5 -4973.761784622589,7 -6745.003458801378,3 -6756.387784467746,6 -6840.273791681527,4 -6869.706795208637,1 -6814.6400018967815,5 -6694.4897056840655,3 -6313.147217789797,4 -7007.673611377591,4 -6270.739842384584,2 -7173.100207327832,6 -4767.7758461881795,7 -6569.587077219348,5 -6817.091171801629,5 -6952.70162785863,1 -7133.711986076643,1 -6882.550248776022,5 -7033.158577774701,1 -6683.759518144136,5 -6482.568711995341,3 -6846.680004599786,6 -6510.620108985187,5 -6752.315554748924,6 -6547.9202963038315,5 -6786.909600688153,1 -4967.443490404249,0 -6641.204407536967,5 -6569.364773256428,3 -6918.8563454273635,3 -6791.360449667722,6 -4825.932424295655,7 -6414.66691982778,4 -6907.699762286447,4 -6773.067856712133,1 -7006.695288715602,1 -6666.775214420044,3 -6803.943435505373,6 -6253.858701431852,5 -6901.139944794222,6 -6674.149194679771,1 -6540.14414467443,3 -4712.025716649294,0 -4917.987912785473,0 -6702.993794845398,2 -4723.844042152061,7 -6854.835933197556,1 -6344.577861985344,2 -6886.4459493475215,6 -7287.0347977983565,6 -7448.104510205641,6 -6602.066986432192,4 -4712.104049868485,7 -6838.877423096601,6 -6574.549786166743,5 -6534.507667739161,3 -6849.274855277696,3 -6383.659600042243,4 -6699.718614777707,1 -6480.476444911588,2 -6736.785344704496,3 -6498.968024812076,4 -6509.353878303233,2 -4646.874313512967,0 -6686.049410823534,4 -7027.767673724881,6 -6848.824455906077,4 -6666.278650371969,2 -6625.850670529142,4 -6879.471731581919,6 -6977.7375913646365,6 -7100.626545938007,6 -6430.402305899699,3 -6460.255107474174,5 -4724.617755010336,0 -6827.3833596170525,1 -4927.894554233038,0 -6772.211428774723,2 -6912.385705832085,6 -6851.310303986916,2 -6253.915974415101,5 -6617.762918876424,4 -6488.526044766867,2 -6169.6472063987185,5 -7213.218357924382,1 -6867.748815644327,5 -6918.33158685993,5 -6791.462025221129,4 -7012.330212850029,6 -6532.385092284261,2 -6341.0649869652525,4 -7071.67625594787,6 -6555.754416331552,1 -6638.048758592245,3 -6720.160451729105,3 -6523.617552923813,5 -6896.766465778542,1 -6469.2223975548095,2 -6795.408503132324,2 -6711.30815213748,3 -6809.734262786927,6 -6488.112791956408,5 -6421.927511143858,2 -6670.267882538749,2 -6353.812272340946,5 -6773.066482558616,6 -6950.169798355646,4 -6322.0438894388335,2 -6740.778667992463,3 -7181.179908813653,1 -6707.744961398099,2 -6670.087823798758,3 -6725.416217740019,2 -7118.079107847797,6 -6596.578183970384,2 -6195.201143486743,5 -6556.333495017416,3 -7162.009166951452,1 -6718.796469049854,4 -7055.791369991619,6 -6700.27576102535,2 -6857.148639439893,1 -6762.858186744903,3 -6341.054477667647,4 -6422.040720466387,3 -6911.039009158492,1 -4761.117914525239,0 -6498.605949279907,3 -6496.963753915143,5 -6939.728445397315,6 -6307.254286934067,3 -6686.648720419727,6 -4790.016182862088,7 -6810.102614714142,6 -7013.384287467183,6 -6383.264450367278,3 -6477.906699276913,4 -6906.469779413795,1 -6461.519370159827,4 -6429.494649467426,2 -6965.61635226687,6 -6767.8597870046215,2 -6504.313892619573,5 -4842.288701265012,7 -6990.837731334252,6 -6581.799665800612,1 -6279.792895306004,2 -7070.817723115797,6 -6625.534843455987,2 -6783.069134751566,6 -7178.867783787074,6 -7222.1686983174495,6 -6768.866952938424,1 -6605.526289924022,3 -7194.593908814582,1 -7084.3281595078115,1 -6944.5712452997,1 -6617.962270096844,6 -6562.159508352914,1 -7332.316747942276,1 -6684.483719660237,3 -6687.299786942702,3 -6528.3639626502145,5 -6893.661817900852,6 -6878.030619170293,4 -6766.723397207286,5 -7111.555638704797,1 -6452.388827271816,4 -6895.939163147777,1 -6689.492940515687,6 -7272.507055475389,6 -7003.533628983656,6 -7027.822589263015,6 -6239.670182664895,3 -6849.9246998682875,6 -6509.435910980421,3 -7215.792345467358,1 -6729.240612503179,6 -6949.756042803154,2 -6941.806139477213,2 -6725.298763051771,1 -6501.4793962907315,6 -6841.195098810346,4 -6667.875241127266,1 -6637.763064165907,6 -4781.890038349574,7 -6727.656845343796,6 -6698.1404391099295,1 -6327.117157844632,4 -6397.364553780272,2 -6391.289486291149,5 -6929.046221830446,6 -6668.629600696243,2 -6659.772302287882,1 -7024.557120878943,1 -6542.431040703648,6 -6662.197336385083,2 -6860.217378735422,6 -7165.897028085659,6 -6421.0144446801405,3 -6715.302411932873,6 -7179.501657489152,6 -7107.341826671578,6 -7133.999263130792,6 -6822.202656066354,4 -6658.311026989245,1 -6679.171766188013,6 -6432.72186618061,3 -7066.167176633809,1 -6593.807674822686,6 -6528.963805530141,5 -6419.536372999682,2 -6775.571847957824,1 -6415.095021942045,2 -6543.103974523736,4 -6684.675059779556,4 -6518.2637343481365,5 -6902.615327602836,5 -4806.297150338174,7 -6882.032903261584,6 -7197.138200766443,6 -6895.411922079926,6 -4848.798493163935,0 -6594.780974361671,5 -6990.669944398589,1 -6707.247231013151,6 -6798.947530915439,1 -6593.648164088919,4 -6450.4936097171585,2 -7028.673331827835,6 -6502.474695592332,5 -6908.648349099276,5 -6776.715614595779,6 -6754.416114628619,4 -6352.549111223198,3 -6793.424792883569,3 -6847.2128819061545,1 -6850.115980357173,5 -6728.887055493863,1 -6512.324304082998,2 -6921.897375256663,6 -6574.983612854163,2 -6765.911746473777,1 -7204.719434491789,1 -6793.701933445064,5 -4747.834434240344,0 -6904.099464949432,1 -6556.498650649859,4 -6487.452898028252,4 -7091.137623194211,6 -6558.203083947783,4 -7052.267436328557,6 -6451.585204603681,3 -6663.051703433664,6 -7200.181161911677,6 -6467.772751668658,1 -6942.784880897236,1 -4980.041093171701,7 -6889.707728421912,6 -7032.429135546456,6 -6642.888844898102,1 -6573.096381704346,2 -6364.6574401626885,5 -7111.534007576396,1 -6368.1754960731205,2 -6711.470438285173,1 -7106.245506009739,1 -6731.085262346296,6 -6659.591905604654,1 -6446.582669295999,6 -6546.195659912682,5 -6970.17923840936,1 -6557.912850896749,3 -6526.928095403732,2 -6430.677534939066,5 -6599.8329935525135,3 -4811.682056402591,7 -6593.602107468692,3 -6464.431877898666,5 -7160.100534378295,1 -6524.230119418091,4 -4776.751814909581,7 -6480.122426673419,4 -6840.609977252182,1 -6587.744353181637,4 -6454.811005150963,3 -6828.409290187323,1 -6817.769825633437,6 -6913.531015191991,1 -6675.94938140968,6 -6718.061335340755,1 -6655.35597536684,3 -7035.631405915873,1 -6526.989352728323,5 -4710.69414732385,0 -6529.030923763831,5 -6934.088731098336,1 -6582.898814319858,2 -6575.160590752268,4 -6818.052868696928,6 -6690.434919614663,1 -6514.644421178833,3 -6427.053256419319,4 -6882.259834612045,1 -7239.7910149774,1 -6867.784162074432,6 -6678.50613881392,1 -6539.781586795751,6 -6576.242517202892,1 -7051.70380056868,1 -6465.337745386526,6 -6868.268358935833,1 -6455.535408774737,5 -6444.123665003491,3 -6544.861092721355,4 -6765.947426427194,1 -6966.006408960935,1 -6962.193114784721,1 -6622.514590624693,6 -6928.792413189897,2 -6431.478778548237,5 -6806.407966501839,6 -7643.492744348258,6 -4677.306232233379,7 -6791.046940705279,3 -6624.141455698888,2 -6428.530571765004,4 -6757.053869009808,1 -7035.788266830386,1 -6741.610473216706,3 -7045.320470653778,1 -6835.681560324988,1 -6753.142802151055,6 -6977.611196456271,6 -7059.836955197423,6 -6564.869634288098,5 -6681.662043091828,4 -7033.283563312336,6 -7013.053319412538,6 -7050.331758559631,6 -7245.475931021296,6 -7076.565597959222,6 -6853.255376002945,1 -6644.535513538003,6 -6734.9263998093475,2 -6643.2931863498,3 -6980.523046569875,1 -6464.550890823937,2 -6402.559795335019,4 -6987.894292552613,1 -7059.717493749915,1 -7278.277375213479,1 -6608.627680862051,4 -6802.36804876291,1 -6587.821800881063,6 -7125.984587899962,6 -4745.515823028452,7 -6858.328887472049,6 -6893.575138086557,4 -6665.308495469184,4 -6719.42988581498,5 -7043.72525837771,1 -7238.4531873183205,1 -7209.206616183026,1 -6608.327048924957,6 -7008.178989242018,6 -6564.509534498061,5 -6702.703714473036,6 -6754.818150164723,1 -7214.136425814089,1 -6355.983154752968,4 -6902.682794982834,6 -6628.443863081731,2 -6574.894316608949,3 -7035.541357597745,1 -6334.818633856557,4 -6807.387538335297,6 -6584.044006248558,4 -6919.844152715309,6 -6766.371071056383,1 -7129.312848324977,1 -6634.373763850145,2 -6811.034759465902,1 -6755.087650624575,6 -6726.604380837529,1 -7026.057053336324,1 -6523.8427754525055,4 -6857.279157407817,1 -6442.815741652059,2 -6821.726188392531,1 -6973.86820068243,6 -7008.076682668492,6 -6588.598818763234,3 -6527.995203995283,6 -7248.367751962529,6 -6765.812846446923,1 -6784.960951544089,6 -6953.595867495968,6 -6944.675543394124,2 -6913.9850980575675,6 -6800.012189642919,1 -6711.608809817665,6 -7148.715113914629,6 -7099.702549175803,6 -6698.474024531293,1 -6396.5480773861855,2 -6969.298966019816,1 -6806.4208527139335,6 -6470.702392681569,1 -6551.873466175245,2 -6829.102089800798,1 -6653.580693272241,5 -6670.174526681627,5 -6665.159605087423,4 -6982.804712671146,1 -6762.1702255702485,6 -6646.6281468479565,1 -6531.10098705186,4 -6912.33452071038,6 -6827.723822586028,1 -6514.459256077422,3 -6804.694557511722,6 -6538.1972389417315,5 -7078.714392264243,1 -6468.1038869036975,3 -6510.856297270849,5 -7042.896894433917,6 -6740.755868771673,1 -7026.316489783123,1 -6534.162053666783,3 -6854.86054740816,3 -6544.526613796688,5 -6703.380721720078,6 -6502.433236809886,5 -6538.646807655236,2 -7202.076305387247,6 -6601.220206279881,3 -6942.596652143229,3 -6749.062170140904,6 -6610.0065784428625,3 -7088.1981238402095,1 -6882.19740006135,6 -6748.611892041983,1 -7151.277985183124,1 -6750.537162026599,6 -6270.372443612368,3 -4734.073117247083,0 -6950.083885868064,6 -6487.972532439468,2 -6726.094144703613,1 -7153.432688313429,1 -7009.062327935331,1 -6644.383427337385,3 -6962.504954900626,1 -6621.402388667736,4 -6782.397440415967,1 -6647.030404126796,2 -7224.929138472891,6 -6608.518417814554,1 -6368.706708669998,4 -7012.836103729059,1 -6485.144427367362,6 -6752.058976667136,1 -6606.183180645693,6 -7017.752429853453,6 -6696.094400098447,1 -7038.997622049156,1 -6577.728656066365,5 -6877.305882789777,1 -7277.567286952831,1 -6788.797395065522,6 -6669.788263900459,4 -6799.568733077409,6 -6643.477064526698,3 -7037.070967687524,1 -7087.510138071448,1 -7086.308804314245,1 -6957.488935966199,1 -7007.6894316534745,1 -6525.626292907609,4 -6612.831668178769,1 -6933.655642269927,1 -6920.998619909258,6 -6636.679662602487,1 -7281.8373673025235,1 -7060.657527782616,1 -7013.4229370844405,1 -6678.072336008778,6 -7090.79108927447,6 -6765.330464690632,1 -7124.388784096479,1 -7309.896195431899,1 -6975.561290369494,1 -7096.686670133886,1 -7135.182190794343,1 -6696.105501865279,6 -6810.678504264907,1 -6889.510856263004,3 -6986.56561462748,1 -7216.009553506323,1 -7037.004015988211,1 -6630.461244044405,3 -6977.282345635189,1 -6835.583289146264,1 -6574.869556973135,6 -6455.298954695393,5 -6605.092298335918,6 -6473.653164276389,1 -6605.102314129782,2 -6854.679568634519,2 -6745.004961163578,1 -6984.301201074132,1 -6763.836577430097,6 -6690.17751752981,1 -7135.665748219999,1 -6979.705714431099,6 -6779.69127501203,1 -6895.832413858248,6 -6382.806696231731,3 -7093.77921258303,1 -7189.613027617213,1 -7038.140532930157,1 -7258.469434886443,1 -7146.205216451886,1 -6737.4106626989105,5 -6868.373698733518,6 -6616.102734416038,1 -6766.417740055118,6 -6682.922092472732,1 -6661.938477737485,6 -6746.69296662486,1 -6984.553859314039,1 -6925.917294390932,1 -7182.686652121508,1 -6556.359866228295,5 -6712.05309539846,1 -6891.349641876419,6 -6825.779149741264,1 -6905.440530981521,1 -6501.373282310099,3 -7270.404885630591,1 -7122.650222358338,1 -7065.077341870995,1 -6721.869044483297,6 -6312.156727512685,5 -7000.358466180085,1 -7001.68060046645,1 -6712.770333473434,6 -6673.378916251139,1 -6718.979772815572,3 -6353.474753789673,4 -6520.757437091273,2 -6280.84418141667,5 -7118.130312511458,1 -6804.089788182993,1 -6896.00206853211,6 -6656.554983868729,1 -6569.742442466311,5 -6354.706272908872,2 -6599.002853506478,2 -6655.8473281137685,3 -7106.845020761222,1 -7062.34085849223,1 -6493.204438708931,5 -7010.013643840379,1 -7238.164389292178,1 -6919.784676563293,1 -7181.703936595872,1 -7203.309412995769,1 -7094.201994113319,1 -6524.27002598332,2 -6744.093238771813,1 -6990.89973060369,1 -7090.952055981292,1 -7085.841254597783,1 -7167.307259605002,1 -7010.279876678065,1 -7108.328572886337,1 -6940.691360105309,1 -6643.084062908825,6 -4753.294566062028,0 -6954.561450792108,1 -6686.603860086715,6 -7220.43175793978,6 -6727.403436031007,1 -7144.999933137424,1 -6865.544516450215,6 -6547.911368916412,1 -6838.503643614076,6 -6535.911956660459,3 diff --git a/openAI_RRM/data_20190419/thomsonagent_22_1.csv b/openAI_RRM/data_20190419/thomsonagent_22_1.csv deleted file mode 100644 index 67b6155..0000000 --- a/openAI_RRM/data_20190419/thomsonagent_22_1.csv +++ /dev/null @@ -1,357 +0,0 @@ -4691.821378295467,3 -4747.781868281091,0 -6810.2060767546245,2 -7039.156893176712,2 -4830.162966468673,3 -6736.1631786527,2 -6621.285074378641,1 -6961.169519539971,1 -7101.783200719381,1 -6547.770972246577,2 -4826.413908898835,3 -6995.178933799551,1 -6661.791828381669,2 -4818.054599131663,0 -6808.930562305568,1 -4772.487320072658,0 -6844.673423714632,2 -6526.7480620809965,1 -6926.971545463797,1 -7223.426363076649,1 -4782.120118249073,3 -6732.432366466177,1 -4945.280877215412,0 -6780.284699023344,2 -7054.588603967637,2 -6524.288752037295,1 -4827.098719244828,3 -6968.374231575206,1 -6948.2172472770235,1 -7130.853332289742,1 -7185.099541337189,1 -4795.221234581697,0 -6937.902576969467,1 -6747.162291354672,2 -7203.684011405461,2 -6591.502687276999,1 -7150.690280638164,1 -6629.254116124082,2 -6980.397785054955,2 -6854.470705549975,2 -4874.580806834957,3 -4549.662010995433,0 -6934.550553263904,1 -6878.637453606209,1 -6997.222642650669,1 -7051.5748606346915,1 -6692.416320091207,2 -6997.508141314875,2 -6729.368849391063,1 -6501.6933641289415,2 -7038.98911894661,2 -7160.71242640548,2 -6548.5090802738705,1 -7067.621101058454,1 -6907.693836839659,1 -4834.340496600629,0 -6821.898342377015,1 -4840.792183226366,3 -4512.869859309449,0 -7022.5136386021695,1 -7032.786118495609,1 -6914.1831350808525,1 -6501.751425537415,2 -7062.235441886667,2 -7323.957079287262,2 -6607.9519179381105,1 -6398.185329153932,2 -7114.957643358861,2 -6627.13641568039,1 -6678.9999908459695,2 -7159.997717239907,2 -7016.395617397477,2 -4856.943465878148,3 -6727.844238526697,2 -6590.862737134234,1 -6560.948575586983,2 -6942.69215828935,2 -6612.178786839411,1 -7173.488170719587,1 -6592.444334135262,2 -6944.441798263471,2 -7224.780696672596,2 -6945.938735897036,2 -6550.4469482043005,1 -6657.691951180514,2 -7151.070536389016,2 -6540.913186670945,1 -6673.955569202702,2 -6604.220267472139,1 -6706.576797549426,2 -6321.703335570578,1 -6622.513259189511,2 -6517.32043941299,1 -7091.774990877828,1 -6561.496328662608,2 -6986.510416320301,2 -6498.260594242665,1 -7039.465072851414,1 -6972.862073848125,1 -6614.340379701002,2 -6481.685939418008,1 -6488.285975576652,2 -7052.235317721339,2 -7145.689039627486,2 -7066.854677876267,2 -4854.523116224001,0 -6560.62442949931,1 -6927.93555980382,1 -6557.960507764279,2 -6572.370459188744,1 -4763.159041763653,0 -6944.48319086769,1 -7255.710893681628,1 -4921.49892626791,0 -7006.53001602364,1 -6371.427138956726,2 -7154.024503480147,2 -6714.136902797468,1 -6637.6811241391715,2 -6593.462202978282,1 -6603.482305204965,2 -7036.667953357609,2 -4785.083875954887,3 -6861.061411826496,1 -6546.701789116396,2 -6789.095683365303,1 -7188.915514573735,1 -7175.342409938945,1 -4666.734401987723,3 -5021.315173393105,3 -6586.4952151873385,2 -7083.9392042188965,2 -6862.459884353468,2 -6716.824784641077,1 -6620.34051370578,2 -6528.92477648543,1 -6511.046832882697,2 -6760.240105898853,1 -6554.197358390977,2 -6660.982736353355,1 -6890.159403797978,1 -6425.44951092657,2 -4903.001999330992,3 -6844.683458460024,2 -6936.6678481833715,2 -6998.42416012928,2 -7077.3879441923755,2 -7034.938554189472,2 -7097.288341481848,2 -6638.207099858215,1 -7206.595874027358,1 -6569.580073162164,2 -6593.935699010321,1 -7305.82092972559,1 -6609.776530784373,2 -7172.817743197306,2 -7293.308871483507,2 -7190.329098661321,2 -6486.494935291109,1 -6674.57087991865,2 -7196.381986518582,2 -7075.09789970231,2 -7029.909756809148,2 -7235.312859217771,2 -7205.213428596774,2 -6928.025037643271,2 -6660.165219070677,1 -6704.164787541989,2 -7129.090494226133,2 -7121.598534156548,2 -6608.335644625997,1 -6560.789003518012,2 -6914.928157856668,2 -6561.320295884463,1 -7122.295478528242,1 -6435.995915857704,2 -7063.364602001561,2 -7125.697143287129,2 -7030.455829908954,2 -7036.7300966861885,2 -6931.157715272526,2 -6770.914368195778,1 -4879.579490044641,0 -6713.511344825237,1 -6883.01289405049,1 -6493.640150587758,2 -7205.999618053382,2 -7150.202277333244,2 -7016.497455758246,2 -6537.783053935472,1 -6581.542212872458,2 -6632.877681526956,1 -6600.952860039681,2 -7019.692235436036,2 -4815.886498133065,3 -6863.858806267042,2 -7082.082164465789,2 -6986.349443285333,2 -7129.4332491310715,2 -6925.937816826795,2 -6978.687347729595,2 -6792.221795494711,2 -7165.078744093788,2 -7038.252790586417,2 -6607.54953141654,1 -7116.021701876727,1 -6640.8227091905555,2 -6512.640630677112,1 -6653.316334501748,2 -6927.471661836795,2 -7036.681811649009,2 -6591.733289440957,1 -7020.875341238627,1 -6856.627085162561,1 -6703.722322250835,2 -7063.718627707796,2 -7007.008419527444,2 -7153.7122844750975,2 -6860.900011379291,2 -7033.895003969097,2 -6557.221679683792,1 -6574.748898327179,2 -6572.408628261485,1 -6984.372613901677,1 -6558.9188755447385,2 -6943.768009209908,2 -6560.190574653687,1 -6514.417182670696,2 -6955.563401923424,2 -6818.6528607667315,2 -7065.434240387544,2 -4773.6889776222,3 -6875.60738523623,1 -6635.833574394149,2 -6776.7699312155355,1 -7206.446723287897,1 -6588.572470652449,2 -7055.703595185254,2 -7064.332937353028,2 -6558.393362231684,1 -7023.411201195939,1 -6681.006883499904,2 -6957.085651008152,2 -7010.8995601015995,2 -7137.2185588555385,2 -6981.576214315369,2 -7174.75112198281,2 -7010.073393016178,2 -6557.497914976642,1 -7120.249858161738,1 -7025.746763402194,1 -6427.927286669994,2 -7167.130521794266,2 -7078.491907453559,2 -6654.650929985208,1 -6740.942139544376,2 -7106.867644058926,2 -6967.119861490053,2 -7096.609802666844,2 -7089.391190278775,2 -6576.771598851629,1 -6905.097814695343,1 -6597.984333278693,2 -6969.493803030655,2 -7064.1554462878175,2 -4854.494118390365,3 -6893.2204666676025,2 -7112.382943674751,2 -7122.95822828163,2 -7093.190398455739,2 -6890.383579778461,2 -7041.510612836343,2 -7098.174643653686,2 -7018.589908395769,2 -7292.864919010752,2 -7027.194784472649,2 -6976.016140966211,2 -6551.76211582348,1 -6615.035337998663,2 -6532.826245154738,1 -7217.916274213911,1 -6477.8260020750795,2 -7094.918111251257,2 -7075.583174225198,2 -4820.735713299038,0 -6941.901220321613,1 -7111.6681272595415,1 -6601.2606840605295,2 -6503.002300922998,1 -6588.298716597173,2 -7074.982596462326,2 -7074.729830791934,2 -6936.167807644136,2 -6841.853773067742,2 -6882.5928794707725,2 -7005.326032307725,2 -6636.47053046202,1 -6601.380455818208,2 -6960.6525917365125,2 -4808.967534758443,3 -6848.405625038253,2 -7034.016884194765,2 -7160.606536783185,2 -6614.785725339969,1 -6842.93824051167,1 -7063.967601032025,1 -6918.119692751169,1 -6798.520173646348,2 -7149.599855318848,2 -7035.434663748853,2 -7014.866795232548,2 -7016.091830894513,2 -7112.310457804022,2 -7208.711391254497,2 -7147.662707978803,2 -7109.300818489204,2 -7156.013790169071,2 -6464.298862795397,1 -6949.92394634094,1 -6582.769260201776,2 -7037.388170087898,2 -6970.943838333824,2 -6904.320530282486,2 -6791.045554624641,2 -7098.993115860696,2 -6959.887812219937,2 -6935.358909391219,2 -7165.778448692931,2 -6972.881387005233,2 -6879.141976966021,2 -7190.836891885245,2 -7029.698815322232,2 -7278.303982379205,2 -6878.8290471481105,2 -6689.37736287149,1 -6666.421194006113,2 -6770.273590733532,2 -7057.731230184832,2 -7252.132052853777,2 -6984.334459252127,2 -7039.26360829572,2 -7123.446773473285,2 -7013.95815944945,2 -6651.929259800371,1 -6813.0960353108485,2 -7007.983572361958,2 -7116.839044085536,2 -6873.254245475726,2 -7100.7735615513675,2 -7015.5154417313015,2 -7195.835062829428,2 -6941.747979328004,2 -7299.395092183973,2 -6998.865054803824,2 -7025.500533767707,2 -7261.513050565645,2 -4958.5172916365345,3 diff --git a/openAI_RRM/data_20190419/thomsonagent_3222.csv b/openAI_RRM/data_20190419/thomsonagent_3222.csv deleted file mode 100644 index 67bf16a..0000000 --- a/openAI_RRM/data_20190419/thomsonagent_3222.csv +++ /dev/null @@ -1,376 +0,0 @@ -6749.705702782545,0 -8191.53045713126,6 -8006.39384871145,3 -8243.270777417089,6 -8417.433311754394,4 -10125.486164909367,2 -6977.537563040518,0 -7061.910809287144,0 -8473.39269458296,4 -10186.26855501716,2 -9923.922440576964,5 -10147.018086858428,2 -6916.103071375785,0 -8213.634187912854,3 -6943.583442967419,7 -6975.167061449313,7 -8360.012717024747,4 -6725.669314201694,7 -8155.6161497775,4 -10406.804829288541,5 -10850.121474682928,5 -6720.568134691852,0 -8422.725461874463,6 -8075.626087790713,1 -8421.446358507485,1 -8579.895806128845,1 -8790.715788349458,1 -10186.92415543578,5 -8054.1202515729965,6 -8035.491142443709,1 -10532.435593279084,5 -8387.31057986762,4 -8475.50200747002,6 -8527.248250176808,4 -10812.632738834405,5 -9689.50375673644,2 -8449.509609343711,3 -8473.687495214703,3 -7125.152845414024,7 -9941.851522604313,2 -9884.110851010752,5 -8170.804704468574,4 -8564.51721198768,4 -10445.158759256843,2 -7005.186784723082,7 -8231.09548517976,4 -8069.438448339008,1 -8320.911543375772,3 -7956.330750447252,4 -8265.121751526112,1 -8002.9622516350955,6 -8271.146614122747,3 -6956.525448959227,7 -7034.503573184953,7 -8251.881628404439,4 -8472.158121226625,6 -10498.747133126764,2 -10060.733795487324,5 -10930.72229240521,5 -6734.25712864296,0 -10443.13236133557,5 -9982.196774955952,2 -10658.05075221081,2 -9882.703878203998,5 -6750.232707306854,0 -10052.753943820357,2 -10682.147263258525,2 -9843.638240984968,5 -10999.129405187598,5 -8315.7652270125,3 -10421.276363444995,2 -8282.420216006925,3 -8091.040166121201,6 -10448.834377862486,2 -6785.280170288703,7 -8202.701440846955,1 -8411.925454711463,3 -6850.307857129235,7 -8384.64936644053,6 -8169.665160400943,3 -8660.98499099975,3 -10183.417763870884,5 -10709.806736186472,5 -10553.165309272455,5 -6689.665744220265,0 -10073.899855289566,5 -9900.052506898373,2 -10148.150980277882,5 -10765.36132639973,5 -10491.865817342314,5 -10095.864389668963,2 -9882.847093054042,5 -10079.125564103011,2 -8212.500414746279,1 -10167.025335597165,2 -10037.316842374204,5 -10788.748475658307,5 -8261.371086580933,6 -10214.014764612433,5 -10256.316682740162,2 -8462.89004335393,6 -6773.696683465912,7 -10411.667056950915,5 -9882.422424592654,2 -10714.039030283533,2 -8502.881288015444,6 -7935.699062134904,1 -8241.33398954639,4 -8575.18628956531,4 -8718.043977237941,4 -9977.483849995395,2 -10071.188295499782,5 -8316.066940801362,1 -10228.525750941979,5 -10402.445818752143,5 -9861.550340127387,2 -8000.06386824836,4 -10403.038177070419,2 -9821.066496720427,5 -10801.668115150635,5 -8146.647012635448,6 -10621.58493773823,2 -10106.845454243576,5 -10570.911125711922,5 -8324.08480213858,4 -8574.818686929328,4 -9884.08413425567,2 -10700.596661953588,2 -8244.581635117622,6 -10433.866775673545,2 -10432.69081403543,2 -10007.37528327169,5 -8287.927638707946,3 -10418.51748653757,5 -8491.336879390203,1 -10405.560064797417,5 -6877.185453385623,0 -8131.531060365645,6 -10305.916175334856,2 -10757.542026245786,2 -10732.277699913004,2 -10876.366920748056,2 -9872.313068161206,5 -10765.944586451196,5 -8341.8089866868,1 -10003.453020131221,2 -10036.197672940794,5 -10773.934496741964,5 -8300.467772314256,4 -10087.187121580428,2 -10794.918756444464,2 -9874.897239099146,5 -10156.892694680919,2 -8213.388105648775,1 -10341.837187197236,5 -8360.871862582393,3 -10436.45802890982,5 -8171.659572046002,3 -8421.747913228599,1 -10263.516331356604,5 -6898.034712156686,7 -10291.492755156547,2 -9978.02172052964,5 -9979.081602122726,2 -10121.154613908295,5 -10612.937430459402,5 -10676.749947049564,5 -10898.624886934726,5 -10804.345096338742,5 -6974.1580685553145,7 -10334.482602343114,5 -10088.098102592761,2 -10581.956034335864,2 -10815.268319807898,2 -10440.292401569108,2 -8497.231841088267,6 -10228.605549002987,5 -9743.532439312092,2 -10962.000956113334,2 -10043.814937230303,5 -10630.814921082252,5 -6885.111216820775,7 -8085.907984777696,1 -10052.727089271035,2 -10043.31907466746,5 -10037.462579299598,2 -10720.221823825357,2 -10073.260856171326,5 -6744.876483091984,0 -10216.86824232489,5 -10676.988226388738,5 -9733.020749547883,2 -8307.335193491255,3 -10311.367199879365,5 -9900.338007061357,2 -10817.825843309702,2 -8311.569847005067,6 -10271.877793682917,2 -8109.4920354820515,1 -10110.38090250417,2 -9849.078782001736,5 -8490.13808531046,1 -10177.621173240477,2 -8383.41312425552,3 -8317.484909703748,3 -10440.593318239258,2 -10655.614748223634,2 -10176.35263900949,5 -8326.677277740975,6 -10042.246560288695,5 -10675.931893914945,5 -9871.333447665602,2 -9739.094775424146,5 -9820.224731553462,2 -10476.739915673345,2 -10524.382730663481,2 -10590.624660022402,2 -9870.03007759726,5 -10241.678611154874,2 -6852.052363515989,0 -10408.47489250419,2 -10534.563528640836,2 -10117.410260816512,5 -10836.533990193986,5 -8261.21391907135,3 -10535.334425601552,2 -10643.829159534565,2 -10024.720712417942,5 -8411.992974277966,1 -10274.805757377713,5 -10066.965464954113,2 -10774.776845564014,2 -10056.433028286376,5 -9994.108544353183,2 -8257.238090131588,1 -10420.213264179656,2 -10931.226584777745,2 -6710.692724221412,7 -8060.3724471447995,4 -10543.565986135978,5 -8174.398258893029,3 -10373.268025558373,5 -9897.262504432783,2 -10492.669982805764,2 -10249.31380632124,5 -8561.025204556541,4 -10317.199379940312,5 -10077.152205303973,2 -9990.547445918222,5 -10161.622011008358,2 -10640.188576093939,2 -6808.006371130064,7 -10167.611798333046,2 -9997.445643984922,5 -10067.082874002388,2 -9985.921910653597,5 -9917.452250316106,2 -9929.307615594533,5 -6882.670426919021,7 -10226.402990855597,2 -8416.962764266525,1 -10288.477793025886,2 -10632.656527959163,2 -8421.848211349155,6 -10299.816853725728,2 -10747.674493905433,2 -10831.67378272185,2 -9784.309103329826,5 -9884.743984126591,2 -10572.206803301407,2 -10007.816862799704,5 -10792.043258649923,5 -10622.588378257102,5 -8212.973560800894,3 -10624.581826653564,2 -9994.456189480545,5 -10061.687371438567,2 -10677.315025762142,2 -8234.561901877754,6 -10324.856060396329,5 -9952.491932853058,2 -10115.060194753967,5 -9934.710977912879,2 -10585.406354169103,2 -10721.73322046106,2 -7034.713737231365,0 -10485.22561843986,5 -10191.96710826229,2 -10032.551097773714,5 -9955.163143768672,2 -10676.954330279434,2 -10675.584892289471,2 -10039.087188344825,5 -10617.388113955829,5 -8101.860343602208,4 -10216.662559711971,2 -8410.343579503766,1 -10139.486513672802,2 -9732.20256258494,5 -10847.189836255726,5 -10716.412254361407,5 -10765.643667142316,5 -10809.919691125706,5 -10618.013795012645,5 -10351.9081328994,5 -10569.886710176965,5 -10230.915190705004,2 -10618.329193033018,2 -9997.736733484067,5 -10889.018467828526,5 -10483.522726905367,5 -10741.35176063109,5 -10725.751820216812,5 -10587.512562531314,5 -10609.074911214364,5 -10633.355555366164,5 -10940.288040214602,5 -10114.347717194669,2 -9936.005900225598,5 -10743.195740329284,5 -10205.415105269809,2 -10896.248555815648,2 -10020.433022182107,5 -10077.114077932469,2 -8312.769394555939,3 -10613.19486690755,2 -10498.119777573058,2 -9817.019508058725,5 -10695.887464930116,5 -10099.514621335422,2 -10508.408392967644,2 -9791.61691792251,5 -10661.56850418523,5 -9831.289939659788,2 -10101.483830179672,5 -10113.229075131458,2 -10205.624439497225,5 -10106.656905115113,2 -10053.085226470432,5 -10794.491721810704,5 -10750.337832540914,5 -10284.781500801862,2 -6976.629907750449,0 -10412.065345820252,2 -10733.388908705077,2 -10028.785663508062,5 -10076.197712976787,2 -10494.706241489588,2 -10003.768303432984,5 -10711.733960320216,5 -9883.076557993676,2 -10773.58485296789,2 -10594.070875233161,2 -10610.918422429952,2 -9868.267383635033,5 -10420.463710512946,2 -10261.072965869002,5 -9902.523731152582,2 -9892.525849670867,5 -10158.632848889032,2 -10805.328814455557,2 -10157.35195878943,5 -10393.808104443066,5 -6901.914875067307,0 -10663.256353066776,2 -10468.606427677329,2 -10491.591222765916,2 -10782.093687452849,2 -10614.242471784924,2 -10855.247727235908,2 -10313.197220898332,2 -9774.67610551284,5 -10290.19502768858,2 -10644.418356164255,2 -10185.932984031024,5 -10285.08333562797,2 From b8f9b6166c73910f37622b36174f0adcc74f18ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Fri, 7 Jun 2019 13:56:47 +0200 Subject: [PATCH 38/54] change observation model and agents --- .../Setting2_2/config_slave.yaml | 2 +- .../Setting2_2/config_slave2.yaml | 2 +- .../Setting2_2unsym/config_slave.yaml | 2 +- .../Setting2_2unsym/config_slave2.yaml | 4 +- .../Setting3_112/config_slave.yaml | 2 +- .../Setting3_112/config_slave2.yaml | 2 +- .../Setting3_112/config_slave3.yaml | 2 +- .../Setting3_222/config_slave.yaml | 2 +- .../Setting3_222/config_slave2.yaml | 2 +- .../Setting3_222/config_slave3.yaml | 2 +- .../Setting8_22213122/common.py | 28 +++ .../Setting8_22213122/config_slave.yaml | 35 ++++ .../Setting8_22213122/config_slave2.yaml | 35 ++++ .../Setting8_22213122/config_slave3.yaml | 35 ++++ .../Setting8_22213122/config_slave4.yaml | 35 ++++ .../Setting8_22213122/config_slave5.yaml | 35 ++++ .../Setting8_22213122/config_slave6.yaml | 35 ++++ .../Setting8_22213122/config_slave7.yaml | 35 ++++ .../Setting8_22213122/config_slave8.yaml | 35 ++++ .../Setting8_22213122/my_filter.py | 53 +++++ .../Setting8_22213122/readme.txt | 15 ++ openAI_RRM/channel_controller.py | 99 +++++++-- openAI_RRM/config_master_simulation.yaml | 1 + openAI_RRM/rrm_agent.py | 196 +++++++++++------- openAI_RRM/thompson_agent.py | 14 +- 25 files changed, 602 insertions(+), 106 deletions(-) create mode 100755 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/common.py create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml create mode 100755 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/my_filter.py create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml index 0305fef..88ebedd 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughput':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml index fff7275..4f59241 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughput':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml index 0f3b703..37610b2 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml @@ -32,6 +32,6 @@ modules: 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:01', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault': 54000000, - 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], + 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml index bdd9d3d..84870f7 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml @@ -31,7 +31,7 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:11"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', - ''simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault': 54000000, - 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault': 54000000, + 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index 8e078b1..7bd46d6 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:01"], 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index d0c06a2..12be0a4 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:11"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index db179bd..427cb58 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02'], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml index ef3246f..1c7c647 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], 'neighbors' : ['aa:aa:aa:aa:aa:02'], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml index d0d4fa7..4f59241 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml index 88e3872..ea0624c 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], 'neighbors' : ['aa:aa:aa:aa:aa:02'], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 200, 'channelThroughput':54000000, 'txBytesRandom':0.2}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/common.py b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml new file mode 100644 index 0000000..f20b051 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml @@ -0,0 +1,35 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:01:01", "cc:cc:cc:cc:01:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:04'], + 'myMAC' : 'aa:aa:aa:aa:aa:01', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml new file mode 100644 index 0000000..bf9c7bc --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml @@ -0,0 +1,35 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:02:01", "cc:cc:cc:cc:02:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:01','aa:aa:aa:aa:aa:03','aa:aa:aa:aa:aa:05'], + 'myMAC' : 'aa:aa:aa:aa:aa:02', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml new file mode 100644 index 0000000..6fa666c --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml @@ -0,0 +1,35 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:03:01", "cc:cc:cc:cc:03:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:06'], + 'myMAC' : 'aa:aa:aa:aa:aa:03', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml new file mode 100644 index 0000000..07c1c6d --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml @@ -0,0 +1,35 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:04:01"], + 'neighbors' : ['aa:aa:aa:aa:aa:01','aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:07'], + 'myMAC' : 'aa:aa:aa:aa:aa:04', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml new file mode 100644 index 0000000..b936e5b --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml @@ -0,0 +1,35 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:05:01", "cc:cc:cc:cc:05:02", "cc:cc:cc:cc:05:03"], + 'neighbors' : ['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:04','aa:aa:aa:aa:aa:06','aa:aa:aa:aa:aa:08'], + 'myMAC' : 'aa:aa:aa:aa:aa:05', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml new file mode 100644 index 0000000..33091ee --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml @@ -0,0 +1,35 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:06:01"], + 'neighbors' : ['aa:aa:aa:aa:aa:03','aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:08'], + 'myMAC' : 'aa:aa:aa:aa:aa:06', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml new file mode 100644 index 0000000..be6e611 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml @@ -0,0 +1,35 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:07:01", "cc:cc:cc:cc:07:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:04', 'aa:aa:aa:aa:aa:08'], + 'myMAC' : 'aa:aa:aa:aa:aa:07', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000]}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml new file mode 100644 index 0000000..105c16e --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml @@ -0,0 +1,35 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:08:01", "cc:cc:cc:cc:08:02"], + 'neighbors' : ['aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:06','aa:aa:aa:aa:aa:07'], + 'myMAC' : 'aa:aa:aa:aa:aa:08', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt new file mode 100644 index 0000000..5a22888 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt @@ -0,0 +1,15 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +source ~/Uniflex/dev/bin/activate + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +python3 rrm_agent.py --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml +uniflex-agent --config ./config_slave2.yaml +uniflex-agent --config ./config_slave3.yaml + +# For debugging mode run with -v option diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 16866e1..c2590e5 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -45,12 +45,16 @@ def __init__(self,**kwargs): self.lastObservation = [] self.actionSet = [] self.simulation = False + self.simulationsteps = None if 'availableChannels' in kwargs: self.availableChannels = kwargs['availableChannels'] if 'simulation' in kwargs: self.simulation = kwargs['simulation'] + + if 'steptime' in kwargs: + self.simulationsteptime = kwargs['steptime'] @modules.on_start() def my_start_function(self): @@ -178,6 +182,30 @@ def set_channel(self, node_uuid, dev_uuid, ifaceName, channel_number, channel_wi else: device.blocking(False).set_channel(channel_number, ifaceName, control_socket_path='/var/run/hostapd') return True + + def get_num_clients(self): + ''' + Returns a list of number of clients of each ap + ''' + client_nums = [] + for node in self.get_nodes(): + for device in node.get_devices(): + for interface in device.get_interfaces(): + infos = device.get_info_of_connected_devices(interface) + client_nums.append(len(infos)) + return client_nums + + def get_num_neighbours(self): + ''' + Returns a list of numbers of neighbours of each ap + ''' + neighbours = [] + for node in self.get_nodes(): + for device in node.get_devices(): + for interface in device.get_interfaces(): + infos = device.get_current_neighbours(interface) + neighbours.append(len(infos)) + return neighbours def get_bandwidth(self): ''' @@ -220,8 +248,11 @@ def get_bandwidth(self): if len(flow) > 0: flow = flow[0] dif = datetime.datetime.now() - flow['last update'] + tmpBandwidth = (newTxBytes - flow['tx bytes'] ) / (dif.total_seconds() + dif.microseconds / 1000000.0) + if(self.simulation and self.simulationsteptime): + tmpBandwidth = (newTxBytes - flow['tx bytes'] ) / (self.simulationsteptime) bandwidth[mac] = { - 'bandwidth':(newTxBytes - flow['tx bytes'] ) / (dif.total_seconds() + dif.microseconds / 1000000.0), + 'bandwidth':(tmpBandwidth), 'node': {'hostname': node.hostname, 'uuid': node.uuid}, 'device': {'name': device.name, 'uuid': device.uuid}, 'interface': interface} @@ -241,7 +272,46 @@ def get_bandwidth(self): if flow['old']: device.my_control_flow.remove(flow) return bandwidth - + + def _get_raw_clientlist(self): + ''' + Returns a list of the bandwidth of all transmitted data from one + controlled device to a client. The data is structured as follows: + { + 'MAC_of_client1' : { + 'mac' : 'MAC_of_client1', + 'node': { + 'hostname': 'hostname of my AP node', + 'uuid': 'uuid of my AP node' + }, + 'device': { + 'name': 'device name of the AP's physical interface', + 'uuid': 'uuid of the device', + }, + 'interface': 'name of the interface' + } + } + Notice: new devices have bandwidth 0! + ''' + clientlist = {} + for node in self.get_nodes(): + for device in node.get_devices(): + if type(device.my_control_flow) is not list: + device.my_control_flow = [] + + for flow in device.my_control_flow: + flow['old'] = True + + for interface in device.get_interfaces(): + infos = device.get_info_of_connected_devices(interface) + + for mac in infos: + clientlist[mac] = { + 'node': {'hostname': node.hostname, 'uuid': node.uuid}, + 'device': {'name': device.name, 'uuid': device.uuid}, + 'interface': interface} + return clientlist + def get_interfaces(self): ''' Returns a data structure of all available interfaces in the system @@ -331,7 +401,7 @@ def simulate_flows(self): for node in self.get_nodes(): for device in node.get_devices(): for interface in device.get_interfaces(): - device.set_packet_counter(flows, interface) + device.set_packet_counter(flows, interface, self.simulationsteptime) @modules.on_event(PeriodicEvaluationTimeEvent) def periodic_evaluation(self, event): @@ -396,12 +466,14 @@ def render(): def get_observationSpace(self): ''' Returns observation space for open AI gym - result is a MultiDiscrete vector space - each component has the number of available channels. Is the same value for all entries + Observation space is a matrix of number of APs * 2 + First column represents the number of clients per ap, + the second column the numer of neighbouring aps + the maximum is 10 ''' - maxValues = [len(self.availableChannels) for i in self._create_interface_list()] - #return spaces.Box(low=0, high=numChannels, shape=(len(self._create_interface_list()),0), dtype=numpy.float32) - return spaces.MultiDiscrete(maxValues) + #maxValues = [len(self.availableChannels) for i in self._create_interface_list()] + return spaces.Box(low=0, high=10, shape=(len(self._create_interface_list()),2), dtype=numpy.uint32) + #return spaces.MultiDiscrete(maxValues) #spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) def get_actionSpace(self): @@ -423,9 +495,12 @@ def get_observation(self): ''' Returns vector with state (channel) of each AP ''' - channels = self.get_channels() - observation = list(map(lambda x: x['channel number'], channels)) - return observation + client_nums = self.get_num_clients() + neighbours_nums = self.get_num_neighbours() + result = [] + for i in range(0, len(neighbours_nums)): + result.append([client_nums[i], neighbours_nums[i]]) + return result # game over if there is a new interface def get_gameOver(self): @@ -478,7 +553,7 @@ def _create_client_list(self): result is list of dictionarys with attribute: mac, node, device, iface ''' clientList = [] - clients = self.get_bandwidth() + clients = self._get_raw_clientlist() for mac, client in clients.items(): clientList.append({'mac': mac, 'node': client['node']['uuid'], 'device': client['device']['uuid'], 'iface': client['interface']}) diff --git a/openAI_RRM/config_master_simulation.yaml b/openAI_RRM/config_master_simulation.yaml index fa2339f..5b017ff 100644 --- a/openAI_RRM/config_master_simulation.yaml +++ b/openAI_RRM/config_master_simulation.yaml @@ -18,6 +18,7 @@ control_applications: openAIGymController: True kwargs : { 'availableChannels' : [1,5], + 'steptime' : 1, 'simulation': True } diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 5946393..ae89402 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -20,7 +20,9 @@ parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) parser.add_argument('--steptime', help='interval between two steps', default=1) -parser.add_argument('--steps', help='number of steps in this execution. If not set, the agents runs infinitly long', default=None) +parser.add_argument('--steps', help='number of steps per episode. If not set, the agents runs infinitly long', default=None) +parser.add_argument('--episodes', help='number of episodes in this execution. If not set, the agents runs infinitly long', default=1) +parser.add_argument('--trainingfile', help='file to load and store training data', default=None) args = parser.parse_args() if not args.config: @@ -28,7 +30,9 @@ os._exit(1) if not args.output: print("No output file specified! - Skip data") - +if not args.trainingfile: + print("No training file specified! - Start with unlearned agent") + if args.plot: import matplotlib.pyplot as plt @@ -46,23 +50,25 @@ rew_history = [] numChannels = 2 -episode = 0 while True: - run = 0 - runs = [] - rewards = [] - actions = [] state = env.reset() n = 0 ac_space = env.action_space ob_space = env.observation_space + + print("reset agent") print("Observation space: ", ob_space, ob_space.dtype) print("Action space: ", ac_space, ac_space.n) - s_size = ob_space.shape[0] + tmps_size = ob_space.shape + s_size = tmps_size[0] * tmps_size[1] + #s_size = list(map(lambda x: x * ob_space.high, s_size)) a_size = ac_space.n + + print(s_size) + model = keras.Sequential() model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) model.add(keras.layers.Dense(5, activation='relu')) @@ -70,6 +76,15 @@ model.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) + + if args.trainingfile and not os.path.isfile(args.trainingfile): + try: + model.load_weights(args.trainingfile) + print("Load model") + except ValueError: + print("Spaces does not match") + + print(state) try: state = np.reshape(state, [1, s_size]) except ValueError: @@ -78,88 +93,109 @@ done = False if a_size == 0: - print("there is no vaild AP - sleep 10 seconds") + print("there is no vaild AP - sleep 2 seconds") time.sleep(2) continue - aps = int(log(a_size, numChannels)) - - for i in range(0, aps): - actions.append([]) + episode = 1 - while not done: - # Choose action - #epsilon = 1 - if np.random.rand(1) < epsilon: - action = np.random.randint(a_size) - else: - action = np.argmax(model.predict(state)[0]) - - # Step - next_state, reward, done, _ = env.step(action) + while True: + print("start episode") + epsilon = 1.0 + + run = 0 + runs = [] + rewards = [] + actions = [] + + aps = int(log(a_size, numChannels)) - reward /= 1000 + for i in range(0, aps): + actions.append([]) + + while not done: + # Choose action + if np.random.rand(1) < epsilon: + action = np.random.randint(a_size) + else: + action = np.argmax(model.predict(state)[0]) - if done: - # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" - # .format(e, total_episodes, time, rewardsum, epsilon)) - break + # Step + next_state, reward, done, _ = env.step(action) + + reward /= 1000 - next_state = np.reshape(next_state, [1, s_size]) + if done: + # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" + # .format(e, total_episodes, time, rewardsum, epsilon)) + break - # Train - target = reward - if not done: - target = (reward)# + 0.95 * np.amax(model.predict(next_state)[0])) - - print(target) + + next_state = np.reshape(next_state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + + newstate = next_state / obspacehigh - target_f = model.predict(state) - target_f[0][action] = target - model.fit(state, target_f, epochs=1, verbose=0) + # Train + target = reward + if not done: + target = (reward)# + 0.95 * np.amax(model.predict(next_state)[0])) + + print(target) + + target_f = model.predict(state) + target_f[0][action] = target + model.fit(state, target_f, epochs=1, verbose=0) - state = next_state - #rewardsum += reward - if epsilon > epsilon_min: epsilon *= epsilon_decay - - rewards.append(reward) - - - if args.output: - with open(args.output, 'a') as csvFile: - writer = csv.writer(csvFile) - writer.writerow([reward, action]) - csvFile.close() - - for ap in range(0, aps): - ifaceaction = int(action / (pow(numChannels, ap))) - ifaceaction = ifaceaction % numChannels - actions[ap].append(ifaceaction) - - print ("Reward: " + str(reward)) - print ("GameOver: " + str(done)) - print ("Next Channels: " + str(next_state)) - print ("Channel selection:" + str(action)) - print ("next step") - - if args.plot: - plt.subplot(211) - plt.plot(run, reward, 'bo') # Additional point - plt.ylabel('reward') - plt.subplot(212) - #for ap in range(0, aps): - # plt.plot(actions[ap]) - plt.plot(run, action, 'bo') # Additional point - plt.ylabel('action') - plt.xlabel('step') - plt.pause(0.05) - - run += 1 - - if args.steps and int(args.steps) < run: - os._exit(1) - - episode += 1 + state = newstate + #rewardsum += reward + if epsilon > epsilon_min: epsilon *= epsilon_decay + + rewards.append(reward) + + if args.trainingfile: + model.save_weights(args.trainingfile) + + if args.output: + with open(args.output, 'a') as csvFile: + writer = csv.writer(csvFile) + writer.writerow([reward, action, episode]) + csvFile.close() + + for ap in range(0, aps): + ifaceaction = int(action / (pow(numChannels, ap))) + ifaceaction = ifaceaction % numChannels + actions[ap].append(ifaceaction) + + print ("Reward: " + str(reward)) + print ("GameOver: " + str(done)) + print ("State: " + str(state)) + print ("Channel selection:" + str(action)) + print ("Run: " + str(run) + ", Episode: " + str(episode)) + print ("next step") + + if args.plot: + plt.subplot(211) + plt.plot(run, reward, 'bo') # Additional point + plt.ylabel('reward') + plt.subplot(212) + #for ap in range(0, aps): + # plt.plot(actions[ap]) + plt.plot(run, action, 'bo') # Additional point + plt.ylabel('action') + plt.xlabel('step') + plt.pause(0.05) + + run += 1 + + # next episode if enough steps, if enough episodes -> exit + if args.steps and int(args.steps) < run: + if args.episodes and int(args.episodes) <= episode: + os._exit(1) + else: + break + + episode += 1 ''' diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 4efcba8..2434473 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -47,7 +47,7 @@ rew_history = [] numChannels = 2 -episode = 0 +episode = 1 while True: run = 0 @@ -67,6 +67,8 @@ avg = [] num = [] maxreward = 1 + lastreward = 0 + lastaction = 0 done = False @@ -85,7 +87,7 @@ # generate random values randval = [] for i in range(a_size): - randval.append(np.random.normal(avg[i]/maxreward, 1/(num[i] + 1), 1)) + randval.append(np.random.normal(avg[i]/maxreward, 1/(pow(num[i],2) + 1), 1)) #take index of highest value action = np.argmax(randval) @@ -93,6 +95,12 @@ #execute step next_state, reward, done, _ = env.step(action) + #hysteresis + if action is not lastaction and abs(reward - lastreward) < 0.1: + reward = reward * 0.75 + lastaction = action + lastreward = reward + # add reward for further execution avg[action] = (avg[action] * num[action] + reward) / (num[action] + 2) num[action] += 1 @@ -105,7 +113,7 @@ if args.output: with open(args.output, 'a') as csvFile: writer = csv.writer(csvFile) - writer.writerow([reward, action]) + writer.writerow([reward, action, episode]) csvFile.close() for ap in range(0, aps): From 8db81b87b9cb00d90c0fad1bc1629b3b7266e3bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sat, 8 Jun 2019 17:10:45 +0200 Subject: [PATCH 39/54] debug thompson --- openAI_RRM/thompson_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 2434473..e40900e 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -96,7 +96,7 @@ next_state, reward, done, _ = env.step(action) #hysteresis - if action is not lastaction and abs(reward - lastreward) < 0.1: + if action != lastaction and abs(reward - lastreward) < 0.1: reward = reward * 0.75 lastaction = action lastreward = reward From 0c11be87696d46bf371d62c01fa5ed532bb69d3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sat, 8 Jun 2019 23:00:42 +0200 Subject: [PATCH 40/54] thompson back to normal, neuro normalize reward --- openAI_RRM/channel_controller.py | 2 +- openAI_RRM/rrm_agent.py | 20 +++++++++++++++----- openAI_RRM/thompson_agent.py | 2 +- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index c2590e5..7485087 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -472,7 +472,7 @@ def get_observationSpace(self): the maximum is 10 ''' #maxValues = [len(self.availableChannels) for i in self._create_interface_list()] - return spaces.Box(low=0, high=10, shape=(len(self._create_interface_list()),2), dtype=numpy.uint32) + return spaces.Box(low=0, high=5, shape=(len(self._create_interface_list()),2), dtype=numpy.uint32) #return spaces.MultiDiscrete(maxValues) #spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index ae89402..fcdce1d 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -41,7 +41,7 @@ #env.configure() env.start_controller(steptime=float(args.steptime), config=args.config) -epsilon = 1.0 # exploration rate +epsilon_max = 1.0 # exploration rate epsilon_min = 0.01 #epsilon_decay = 0.99 epsilon_decay = 0.995 @@ -54,6 +54,7 @@ while True: state = env.reset() + n = 0 ac_space = env.action_space ob_space = env.observation_space @@ -69,6 +70,10 @@ print(s_size) + state = np.reshape(state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + state = state *2 / obspacehigh - 1 + model = keras.Sequential() model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) model.add(keras.layers.Dense(5, activation='relu')) @@ -97,7 +102,8 @@ time.sleep(2) continue - episode = 1 + episode = epsilon_max + epsilon_max = epsilon_max * 2/3 while True: print("start episode") @@ -107,6 +113,7 @@ runs = [] rewards = [] actions = [] + maxreward = 1 aps = int(log(a_size, numChannels)) @@ -123,7 +130,9 @@ # Step next_state, reward, done, _ = env.step(action) - reward /= 1000 + maxreward = max(reward, maxreward) + + reward /= maxreward if done: # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" @@ -133,8 +142,7 @@ next_state = np.reshape(next_state, [1, s_size]) obspacehigh = np.reshape(ob_space.high, [1, s_size]) - - newstate = next_state / obspacehigh + newstate = next_state *2 / obspacehigh - 1 # Train target = reward @@ -144,7 +152,9 @@ print(target) target_f = model.predict(state) + print(target_f) target_f[0][action] = target + print(target_f) model.fit(state, target_f, epochs=1, verbose=0) state = newstate diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index e40900e..fa0d4d1 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -87,7 +87,7 @@ # generate random values randval = [] for i in range(a_size): - randval.append(np.random.normal(avg[i]/maxreward, 1/(pow(num[i],2) + 1), 1)) + randval.append(np.random.normal(avg[i]/maxreward, 1/(pow(num[i],1) + 1), 1)) #take index of highest value action = np.argmax(randval) From 15c96b75dff4c091398cfc8477c8dc4307b732e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sat, 8 Jun 2019 23:07:59 +0200 Subject: [PATCH 41/54] add client switch in config --- .../SimulationSlavesConfig/Setting3_112/config_slave.yaml | 4 ++-- .../SimulationSlavesConfig/Setting3_112/config_slave2.yaml | 4 ++-- .../SimulationSlavesConfig/Setting3_112/config_slave3.yaml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index 7bd46d6..98921b5 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -28,8 +28,8 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:01"], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientconf': '~/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index 12be0a4..d5109ea 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -28,8 +28,8 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:11"], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '~/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index 427cb58..40d1d02 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -31,5 +31,5 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02'], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientconf': '~/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3'}} From 773eec1061935aa54fd51b1af10183d35c294ac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sun, 9 Jun 2019 20:20:08 +0200 Subject: [PATCH 42/54] improve nn agent --- .../Setting3_112/config_slave.yaml | 3 +- .../Setting3_112/config_slave2.yaml | 3 +- .../Setting3_112/config_slave3.yaml | 3 +- openAI_RRM/channel_controller.py | 43 +++++++++++++-- openAI_RRM/config_master_simulation.yaml | 3 +- openAI_RRM/rrm_agent.py | 54 ++++++++++++++----- openAI_RRM/thompson_agent.py | 6 +-- 7 files changed, 91 insertions(+), 24 deletions(-) diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index 98921b5..8374952 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -31,5 +31,6 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientconf': '~/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1'}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1' + }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index d5109ea..86cbb73 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -31,5 +31,6 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '~/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2'}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2' + }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index 40d1d02..5ed28ec 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -31,5 +31,6 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02'], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientconf': '~/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3'}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3' + }} diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 7485087..2113467 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -46,6 +46,10 @@ def __init__(self,**kwargs): self.actionSet = [] self.simulation = False self.simulationsteps = None + self.aporder = None + + self.actionOrder = [] + self.observationOrder = [] if 'availableChannels' in kwargs: self.availableChannels = kwargs['availableChannels'] @@ -55,6 +59,9 @@ def __init__(self,**kwargs): if 'steptime' in kwargs: self.simulationsteptime = kwargs['steptime'] + + if 'order' in kwargs: + self.aporder = kwargs['order'] @modules.on_start() def my_start_function(self): @@ -331,7 +338,15 @@ def get_interfaces(self): }, ... } + + fills self.aporder. Map index in uniflex to index in order list + fills self.observationOrder. Map index in agent to index in uniflex list ''' + orphanApId = 0 + if self.aporder: + orphanApId = len(self.aporder) + + self.actionOrder = [] interfaces = {} for node in self.get_nodes(): nodeinfo = {'hostname': node.hostname, 'uuid': node.uuid} @@ -341,10 +356,27 @@ def get_interfaces(self): interfaces_tmp = [] for interface in device.get_interfaces(): interfaces_tmp.append(interface) + if self.aporder: + try: + mac = device.get_address(interface) + indexInOrder = self.aporder.index(mac) + self.actionOrder.append(indexInOrder) + except ValueError: + print("Device is unknown:" + mac) + self.actionOrder.append(orphanApId) + orphanApId += 1 + else: + self.actionOrder.append(orphanApId) + orphanApId += 1 + devinfo['interfaces'] = interfaces_tmp devices[device.uuid] = devinfo nodeinfo['devices'] = devices interfaces[node.uuid] = nodeinfo + + self.observationOrder = [] + for i in range(0, len(self.actionOrder)): + self.observationOrder.append(self.actionOrder.index(i)) return interfaces def get_channels(self): @@ -454,7 +486,8 @@ def execute_action(self, action): channel value = (action/numberOfChannels^AP_id) mod numberOfChannels ''' for index, interface in enumerate(self._create_interface_list()): - ifaceaction = int(action / (pow(len(self.availableChannels),index))) + apindex = self.actionOrder[index] + ifaceaction = int(action / (pow(len(self.availableChannels),apindex))) ifaceaction = ifaceaction % len(self.availableChannels) self.set_channel(interface['node'], interface['device'], interface['iface'], self.availableChannels[ifaceaction], None) @@ -497,9 +530,13 @@ def get_observation(self): ''' client_nums = self.get_num_clients() neighbours_nums = self.get_num_neighbours() - result = [] + resultUniflexOrder = [] for i in range(0, len(neighbours_nums)): - result.append([client_nums[i], neighbours_nums[i]]) + resultUniflexOrder.append([client_nums[i], neighbours_nums[i]]) + #switch order of values in list + result = [] + for i in range(0, len(resultUniflexOrder)): + result.append(resultUniflexOrder[self.observationOrder[i]]) return result # game over if there is a new interface diff --git a/openAI_RRM/config_master_simulation.yaml b/openAI_RRM/config_master_simulation.yaml index 5b017ff..6ecebde 100644 --- a/openAI_RRM/config_master_simulation.yaml +++ b/openAI_RRM/config_master_simulation.yaml @@ -19,7 +19,8 @@ control_applications: kwargs : { 'availableChannels' : [1,5], 'steptime' : 1, - 'simulation': True + 'simulation': True, + 'order': ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:03'] } modules: diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index fcdce1d..08c136b 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -15,6 +15,12 @@ from math import * +def normalize_state(state, ob_space, s_size): + state = np.reshape(state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + state = state *2 / obspacehigh - 1 + return state + parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) @@ -70,9 +76,7 @@ print(s_size) - state = np.reshape(state, [1, s_size]) - obspacehigh = np.reshape(ob_space.high, [1, s_size]) - state = state *2 / obspacehigh - 1 + state = normalize_state(state, ob_space, s_size) model = keras.Sequential() model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) @@ -95,31 +99,40 @@ except ValueError: continue rewardsum = 0 - done = False if a_size == 0: print("there is no vaild AP - sleep 2 seconds") time.sleep(2) continue - episode = epsilon_max - epsilon_max = epsilon_max * 2/3 + episode = 1 + # Schleife für Episoden while True: print("start episode") - epsilon = 1.0 run = 0 runs = [] rewards = [] actions = [] - maxreward = 1 + maxreward = 0.00001 + minreward = np.inf + + epsilon = epsilon_max + epsilon_max *= 0.999 + epsilon_max = pow(epsilon_max, 3) + done = False + lastreward = 0 + lastaction = 0 aps = int(log(a_size, numChannels)) for i in range(0, aps): actions.append([]) + state = env.reset() + state = normalize_state(state, ob_space, s_size) + while not done: # Choose action if np.random.rand(1) < epsilon: @@ -130,9 +143,24 @@ # Step next_state, reward, done, _ = env.step(action) - maxreward = max(reward, maxreward) + minreward = min(reward, minreward) + reward -= minreward + maxreward = max(reward, maxreward) reward /= maxreward + + #set reward to 1.0 if it is first value + if maxreward == 0.00001: + reward = 1.0 + + reward = pow(reward, 2) + + #hysteresis + if action != lastaction and abs(reward - lastreward) < 0.1: + reward *= 0.9 + lastaction = action + lastreward = reward + if done: # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" @@ -140,9 +168,7 @@ break - next_state = np.reshape(next_state, [1, s_size]) - obspacehigh = np.reshape(ob_space.high, [1, s_size]) - newstate = next_state *2 / obspacehigh - 1 + next_state = normalize_state(next_state, ob_space, s_size) # Train target = reward @@ -157,7 +183,7 @@ print(target_f) model.fit(state, target_f, epochs=1, verbose=0) - state = newstate + state = next_state #rewardsum += reward if epsilon > epsilon_min: epsilon *= epsilon_decay @@ -199,7 +225,7 @@ run += 1 # next episode if enough steps, if enough episodes -> exit - if args.steps and int(args.steps) < run: + if args.steps and int(args.steps) <= run: if args.episodes and int(args.episodes) <= episode: os._exit(1) else: diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index fa0d4d1..68ba3e7 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -140,11 +140,11 @@ plt.xlabel('step') plt.pause(0.05) - if args.steps and int(args.steps) < run: - os._exit(1) - run += 1 + if args.steps and int(args.steps) <= run: + os._exit(1) + episode += 1 From a5e4d838cf79cedd991debc9a4968ee0b73b1a9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 18 Jun 2019 16:15:14 +0200 Subject: [PATCH 43/54] improve rrm agent --- openAI_RRM/channel_controller.py | 5 +- openAI_RRM/config_master_simulation.yaml | 2 +- openAI_RRM/rrm_agent.py | 75 +++++++++++++++++------- 3 files changed, 57 insertions(+), 25 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 2113467..a4abd07 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -358,7 +358,7 @@ def get_interfaces(self): interfaces_tmp.append(interface) if self.aporder: try: - mac = device.get_address(interface) + mac = device.getHwAddr(interface) indexInOrder = self.aporder.index(mac) self.actionOrder.append(indexInOrder) except ValueError: @@ -375,6 +375,7 @@ def get_interfaces(self): interfaces[node.uuid] = nodeinfo self.observationOrder = [] + print(self.actionOrder) for i in range(0, len(self.actionOrder)): self.observationOrder.append(self.actionOrder.index(i)) return interfaces @@ -425,7 +426,7 @@ def simulate_flows(self): chnum = device.get_channel(interface) chw = device.get_channel_width(interface) infos = device.get_info_of_connected_devices(interface) - mac = device.get_address() + mac = device.getHwAddr() flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw, 'iface': interface}) diff --git a/openAI_RRM/config_master_simulation.yaml b/openAI_RRM/config_master_simulation.yaml index 6ecebde..370b7fa 100644 --- a/openAI_RRM/config_master_simulation.yaml +++ b/openAI_RRM/config_master_simulation.yaml @@ -20,7 +20,7 @@ control_applications: 'availableChannels' : [1,5], 'steptime' : 1, 'simulation': True, - 'order': ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:03'] + 'order': ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:03', 'aa:aa:aa:aa:aa:04', 'aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:06', 'aa:aa:aa:aa:aa:07','aa:aa:aa:aa:aa:08'] } modules: diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 08c136b..e3d9a79 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -13,7 +13,10 @@ import csv import os from math import * +from scipy.optimize import fsolve +AVGTIME_ONEVALUE_RAND = 20 +RANDVALUE_FIRST_EPISODE = 0.55 #60% def normalize_state(state, ob_space, s_size): state = np.reshape(state, [1, s_size]) @@ -21,13 +24,24 @@ def normalize_state(state, ob_space, s_size): state = state *2 / obspacehigh - 1 return state +def guess_random_numbers_in_firstEpisode(a_size): + return AVGTIME_ONEVALUE_RAND * a_size + +def guess_steps(a_size): + return guess_random_numbers_in_firstEpisode(a_size) / RANDVALUE_FIRST_EPISODE + +def guess_epsilon_decay(steps, a_size): + func = lambda epsilon_decay: guess_random_numbers_in_firstEpisode(a_size) - (1-epsilon_decay**(steps + 1)) / (1 - epsilon_decay) + return fsolve(func, 0.9999999999)[0] + parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) parser.add_argument('--steptime', help='interval between two steps', default=1) -parser.add_argument('--steps', help='number of steps per episode. If not set, the agents runs infinitly long', default=None) -parser.add_argument('--episodes', help='number of episodes in this execution. If not set, the agents runs infinitly long', default=1) +#parser.add_argument('--steps', help='number of steps per episode. If not set, the agents runs infinitly long', default=None) +parser.add_argument('--episodes', help='number of episodes in this execution. If not set, the agents runs infinitly long', default=None) +parser.add_argument('--startepisode', help='The episode we start with', default=1) parser.add_argument('--trainingfile', help='file to load and store training data', default=None) args = parser.parse_args() @@ -42,6 +56,8 @@ def normalize_state(state, ob_space, s_size): if args.plot: import matplotlib.pyplot as plt +print("Start at episode " + str(args.startepisode)) + #create uniflex environment, steptime is 10sec env = gym.make('uniflex-v0') #env.configure() @@ -74,7 +90,7 @@ def normalize_state(state, ob_space, s_size): #s_size = list(map(lambda x: x * ob_space.high, s_size)) a_size = ac_space.n - print(s_size) + print("observation_space size:" + str(s_size)) state = normalize_state(state, ob_space, s_size) @@ -92,8 +108,10 @@ def normalize_state(state, ob_space, s_size): print("Load model") except ValueError: print("Spaces does not match") + except tf.errors.NotFoundError: + print("File not found. Skip loading") - print(state) + print("State (Observation) of System" + str(state)) try: state = np.reshape(state, [1, s_size]) except ValueError: @@ -105,7 +123,21 @@ def normalize_state(state, ob_space, s_size): time.sleep(2) continue + steps = guess_steps(a_size) + epsilon_decay = guess_epsilon_decay(steps, a_size) + print("Initialize agent. Exploration rate is " + str(epsilon_decay) + + ", an episode has at most " + str(steps) + " steps") + + rewardpow = int(log(a_size, 2)) + episode = 1 + maxreward = 0.00001 + minreward = np.inf + + while episode < int(args.startepisode): + epsilon_max *= 0.999 + epsilon_max = max(pow(epsilon_max, 3), epsilon_min) + episode += 1 # Schleife für Episoden while True: @@ -115,20 +147,18 @@ def normalize_state(state, ob_space, s_size): runs = [] rewards = [] actions = [] - maxreward = 0.00001 - minreward = np.inf epsilon = epsilon_max epsilon_max *= 0.999 - epsilon_max = pow(epsilon_max, 3) + epsilon_max = max(pow(epsilon_max, 3), epsilon_min) done = False lastreward = 0 lastaction = 0 aps = int(log(a_size, numChannels)) - for i in range(0, aps): - actions.append([]) + #for i in range(0, aps): + # actions.append([]) state = env.reset() state = normalize_state(state, ob_space, s_size) @@ -153,7 +183,7 @@ def normalize_state(state, ob_space, s_size): if maxreward == 0.00001: reward = 1.0 - reward = pow(reward, 2) + reward = pow(reward, rewardpow) #hysteresis if action != lastaction and abs(reward - lastreward) < 0.1: @@ -165,6 +195,8 @@ def normalize_state(state, ob_space, s_size): if done: # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" # .format(e, total_episodes, time, rewardsum, epsilon)) + maxreward = 0.00001 + minreward = np.inf break @@ -175,22 +207,19 @@ def normalize_state(state, ob_space, s_size): if not done: target = (reward)# + 0.95 * np.amax(model.predict(next_state)[0])) - print(target) + print("Scaled reward: " + str(target)) target_f = model.predict(state) - print(target_f) + print("agent learning" + str(target_f)) target_f[0][action] = target - print(target_f) + print("agent new learning" + str(target_f)) model.fit(state, target_f, epochs=1, verbose=0) state = next_state #rewardsum += reward if epsilon > epsilon_min: epsilon *= epsilon_decay - rewards.append(reward) - - if args.trainingfile: - model.save_weights(args.trainingfile) + #rewards.append(reward) if args.output: with open(args.output, 'a') as csvFile: @@ -198,10 +227,10 @@ def normalize_state(state, ob_space, s_size): writer.writerow([reward, action, episode]) csvFile.close() - for ap in range(0, aps): - ifaceaction = int(action / (pow(numChannels, ap))) - ifaceaction = ifaceaction % numChannels - actions[ap].append(ifaceaction) + #for ap in range(0, aps): + # ifaceaction = int(action / (pow(numChannels, ap))) + # ifaceaction = ifaceaction % numChannels + # actions[ap].append(ifaceaction) print ("Reward: " + str(reward)) print ("GameOver: " + str(done)) @@ -225,7 +254,9 @@ def normalize_state(state, ob_space, s_size): run += 1 # next episode if enough steps, if enough episodes -> exit - if args.steps and int(args.steps) <= run: + if steps <= run: + if args.trainingfile: + model.save_weights(args.trainingfile) if args.episodes and int(args.episodes) <= episode: os._exit(1) else: From 139a01068879776fae8515d69556979b9e209d2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Wed, 19 Jun 2019 16:49:57 +0200 Subject: [PATCH 44/54] implement round robin training mode without debug, enable mode for setting 112 --- .../Setting3_112/config_slave.yaml | 4 +- .../Setting3_112/config_slave2.yaml | 4 +- .../Setting3_112/config_slave3.yaml | 4 +- openAI_RRM/channel_controller.py | 39 +++++++++++++++++-- openAI_RRM/config_master_simulation.yaml | 2 + openAI_RRM/rrm_agent.py | 32 ++++++++++++--- 6 files changed, 73 insertions(+), 12 deletions(-) diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index 8374952..2d913ba 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -28,7 +28,9 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], + 'mode': 'training', # training or working + 'numsClients': [6,1,2,1,1], 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:01', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1' diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index 86cbb73..9282f13 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -28,7 +28,9 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], + 'mode': 'training', # training or working + 'numsClients': [0,1,2,1,2], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], 'myMAC' : 'aa:aa:aa:aa:aa:02', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2' diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index 5ed28ec..cd3dd94 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -28,7 +28,9 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], + 'mode': 'training', # training or working + 'numsClients': [0,5,2,2,3], 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02'], 'myMAC' : 'aa:aa:aa:aa:aa:03', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3' diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index a4abd07..7601ed2 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -47,6 +47,10 @@ def __init__(self,**kwargs): self.simulation = False self.simulationsteps = None self.aporder = None + self.mode = "" + self.scenarios = 1 + self.currentScenario = 0 + self.actionOrder = [] self.observationOrder = [] @@ -62,6 +66,12 @@ def __init__(self,**kwargs): if 'order' in kwargs: self.aporder = kwargs['order'] + + if 'mode' in kwargs: + self.mode = kwargs['mode'] + + if 'scenarios' in kwargs: + self.scenarios = kwargs['scenarios'] @modules.on_start() def my_start_function(self): @@ -434,7 +444,7 @@ def simulate_flows(self): for node in self.get_nodes(): for device in node.get_devices(): for interface in device.get_interfaces(): - device.set_packet_counter(flows, interface, self.simulationsteptime) + device.set_packet_counter(flows, interface, self.simulationsteptime, self.currentScenario) @modules.on_event(PeriodicEvaluationTimeEvent) def periodic_evaluation(self, event): @@ -462,6 +472,15 @@ def reset(self): self.observationSpace = self.get_observationSpace() self.actionSpace = self.get_actionSpace() self.actionSet = [] + self.currentScenario = 0 + + self.observations = [] + for obs in range(self.scenarios): + obsElem = [] + neighbours_nums = self.get_num_neighbours() + for i in range(0, len(neighbours_nums)): + obsElem.append([0, 0]) + self.observations.append(obsElem) interfaces = self.get_interfaces() @@ -488,8 +507,9 @@ def execute_action(self, action): ''' for index, interface in enumerate(self._create_interface_list()): apindex = self.actionOrder[index] - ifaceaction = int(action / (pow(len(self.availableChannels),apindex))) - ifaceaction = ifaceaction % len(self.availableChannels) + ifaceaction = action[apindex] + #ifaceaction = int(action / (pow(len(self.availableChannels),apindex))) + #ifaceaction = ifaceaction % len(self.availableChannels) self.set_channel(interface['node'], interface['device'], interface['iface'], self.availableChannels[ifaceaction], None) return @@ -523,7 +543,9 @@ def get_actionSpace(self): self.log.info(str(key) + ":" + interface['device']) if len(interfaceList) == 0: return spaces.Discrete(0) - return spaces.Discrete(pow(len(self.availableChannels), len(interfaceList))) + maxValues = [len(self.availableChannels) for i in self._create_interface_list()] + return spaces.MultiDiscrete(maxValues) + #([ 5, 2, 2 ])(pow(len(self.availableChannels), len(interfaceList))) def get_observation(self): ''' @@ -538,6 +560,15 @@ def get_observation(self): result = [] for i in range(0, len(resultUniflexOrder)): result.append(resultUniflexOrder[self.observationOrder[i]]) + + if self.mode == "training": + #store obsersavion for next time with this scenario + self.observations[self.currentScenario] = result + #load obsersavion for next scenario + self.currentScenario ++ + if self.currentScenario >= self.scenarios: + self.currentScenario = 0 + result = self.observations[self.currentScenario] return result # game over if there is a new interface diff --git a/openAI_RRM/config_master_simulation.yaml b/openAI_RRM/config_master_simulation.yaml index 370b7fa..90e51d0 100644 --- a/openAI_RRM/config_master_simulation.yaml +++ b/openAI_RRM/config_master_simulation.yaml @@ -20,6 +20,8 @@ control_applications: 'availableChannels' : [1,5], 'steptime' : 1, 'simulation': True, + 'mode': 'training', # training or working + 'scenarios': 5, 'order': ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:03', 'aa:aa:aa:aa:aa:04', 'aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:06', 'aa:aa:aa:aa:aa:07','aa:aa:aa:aa:aa:08'] } diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index e3d9a79..99fd699 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -16,12 +16,23 @@ from scipy.optimize import fsolve AVGTIME_ONEVALUE_RAND = 20 -RANDVALUE_FIRST_EPISODE = 0.55 #60% +RANDVALUE_FIRST_EPISODE = 0.55 +REWARD_INIT = 0.00001 +sortedIndecies = [] +ac_space = [] def normalize_state(state, ob_space, s_size): state = np.reshape(state, [1, s_size]) obspacehigh = np.reshape(ob_space.high, [1, s_size]) state = state *2 / obspacehigh - 1 + + #sort states + index = np.arange(state.shape[0]) + index = index.reshape((-1,1)) + state = np.insert(state, -1, index, axis=1) + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + sortedIndecies = state[:,-1] + state = np.delete(state, -1, axis=1) return state def guess_random_numbers_in_firstEpisode(a_size): @@ -34,6 +45,15 @@ def guess_epsilon_decay(steps, a_size): func = lambda epsilon_decay: guess_random_numbers_in_firstEpisode(a_size) - (1-epsilon_decay**(steps + 1)) / (1 - epsilon_decay) return fsolve(func, 0.9999999999)[0] +def map_action(mappedAction): + action = np.zeros(len(mappedAction)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + action[np.where(action == index)[0]] = ifaceaction + return action + parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) @@ -88,7 +108,7 @@ def guess_epsilon_decay(steps, a_size): tmps_size = ob_space.shape s_size = tmps_size[0] * tmps_size[1] #s_size = list(map(lambda x: x * ob_space.high, s_size)) - a_size = ac_space.n + a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) print("observation_space size:" + str(s_size)) @@ -131,7 +151,7 @@ def guess_epsilon_decay(steps, a_size): rewardpow = int(log(a_size, 2)) episode = 1 - maxreward = 0.00001 + maxreward = REWARD_INIT minreward = np.inf while episode < int(args.startepisode): @@ -170,6 +190,8 @@ def guess_epsilon_decay(steps, a_size): else: action = np.argmax(model.predict(state)[0]) + action = map_action(action) + # Step next_state, reward, done, _ = env.step(action) @@ -180,7 +202,7 @@ def guess_epsilon_decay(steps, a_size): reward /= maxreward #set reward to 1.0 if it is first value - if maxreward == 0.00001: + if maxreward == REWARD_INIT: reward = 1.0 reward = pow(reward, rewardpow) @@ -195,7 +217,7 @@ def guess_epsilon_decay(steps, a_size): if done: # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" # .format(e, total_episodes, time, rewardsum, epsilon)) - maxreward = 0.00001 + maxreward = REWARD_INIT minreward = np.inf break From 67cf4c7a2c9135ddb9e482c8f1cdd989aaff2bf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 15 Jul 2019 13:35:45 +0200 Subject: [PATCH 45/54] static various setups --- .../Setting3_112/config_slave.yaml | 8 +- .../Setting3_112/config_slave2.yaml | 10 +- .../Setting3_112/config_slave3.yaml | 10 +- .../Setting3_222/config_slave.yaml | 8 +- .../Setting3_222/config_slave2.yaml | 9 +- .../Setting3_222/config_slave3.yaml | 9 +- .../Setting3_varSet/common.py | 28 ++ .../Setting3_varSet/config_slave.yaml | 41 +++ .../Setting3_varSet/config_slave2.yaml | 41 +++ .../Setting3_varSet/config_slave3.yaml | 41 +++ .../Setting3_varSet/my_filter.py | 53 ++++ .../Setting3_varSet/readme.txt | 15 + .../Setting3_varSetsmall/common.py | 28 ++ .../Setting3_varSetsmall/config_slave.yaml | 38 +++ .../Setting3_varSetsmall/config_slave2.yaml | 38 +++ .../Setting3_varSetsmall/config_slave3.yaml | 38 +++ .../Setting3_varSetsmall/my_filter.py | 53 ++++ .../Setting3_varSetsmall/readme.txt | 15 + openAI_RRM/channel_controller.py | 61 +++- openAI_RRM/rrm_agent.py | 171 ++++++++--- .../rrm_agent_evalmodel1_cliap-apcli.py | 264 ++++++++++++++++ openAI_RRM/rrm_agent_evalmodel2_sameset112.py | 279 +++++++++++++++++ ...rrm_agent_evalmodel3_difset_sort_cli-ap.py | 284 +++++++++++++++++ .../rrm_agent_evalmodel4_difset_unsorted.py | 287 +++++++++++++++++ ...valmodel5_difset_sort_cli-ap_detecttopo.py | 290 ++++++++++++++++++ 25 files changed, 2029 insertions(+), 90 deletions(-) create mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_varSet/common.py create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml create mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_varSet/my_filter.py create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt create mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/common.py create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave2.yaml create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave3.yaml create mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/my_filter.py create mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/readme.txt create mode 100644 openAI_RRM/rrm_agent_evalmodel1_cliap-apcli.py create mode 100644 openAI_RRM/rrm_agent_evalmodel2_sameset112.py create mode 100644 openAI_RRM/rrm_agent_evalmodel3_difset_sort_cli-ap.py create mode 100644 openAI_RRM/rrm_agent_evalmodel4_difset_unsorted.py create mode 100644 openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index 2d913ba..a326d49 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -29,10 +29,10 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], - 'mode': 'training', # training or working - 'numsClients': [6,1,2,1,1], - 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1' + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1', + 'mode': 'training', # training or working + 'numsClients': [6,1,2,1,1] }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index 9282f13..06bbaed 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -28,11 +28,11 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], - 'mode': 'training', # training or working - 'numsClients': [0,1,2,1,2], - 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2' + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2', + 'mode': 'training', # training or working + 'numsClients': [0,1,2,1,2] }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index cd3dd94..a903689 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -28,11 +28,11 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], - 'mode': 'training', # training or working - 'numsClients': [0,5,2,2,3], - 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02'], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3' + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3', + 'mode': 'training', # training or working + 'numsClients': [0,5,2,2,3] }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml index 1c7c647..b494c57 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml @@ -28,8 +28,10 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:02'], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], + 'neighbors' : [['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02']], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, + 'mode': 'training', # training or working + 'numsClients': [1,1,5,1,2]}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml index 4f59241..e6ad37c 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml @@ -28,8 +28,11 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], - 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], + 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, + 'mode': 'training', # training or working + 'numsClients': [5,2,1,1,2] + }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml index ea0624c..45bf91c 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml @@ -28,8 +28,11 @@ modules: module : uniflex_module_simple class_name : SimpleModule4 devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22"], - 'neighbors' : ['aa:aa:aa:aa:aa:02'], + kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], + 'neighbors' : [['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02']], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, + 'mode': 'training', # training or working + 'numsClients': [1,3,5,4,2] + }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/common.py b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml new file mode 100644 index 0000000..3f02d06 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml @@ -0,0 +1,41 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:03']], + 'myMAC' : 'aa:aa:aa:aa:aa:01', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1', + 'mode': 'generator', # training or working or generator + 'numsClients': [1,1,5,2,4], + #for generator + 'maxNumClients' : 100, + 'scenariosPerAPSetting': 40 + }} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml new file mode 100644 index 0000000..3aeb32e --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml @@ -0,0 +1,41 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], + 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], []], + 'myMAC' : 'aa:aa:aa:aa:aa:02', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2', + 'mode': 'generator', # training or working or generator + 'numsClients': [1,3,1,4,3], + #for generator + 'maxNumClients' : 100, + 'scenariosPerAPSetting': 40 + }} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml new file mode 100644 index 0000000..21dc9a2 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml @@ -0,0 +1,41 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:01']], + 'myMAC' : 'aa:aa:aa:aa:aa:03', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3', + 'mode': 'generator', # training or working or generator + 'numsClients': [5,2,3,2,2], + #for generator + 'maxNumClients' : 100, + 'scenariosPerAPSetting': 40 + }} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt new file mode 100644 index 0000000..5a22888 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt @@ -0,0 +1,15 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +source ~/Uniflex/dev/bin/activate + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +python3 rrm_agent.py --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml +uniflex-agent --config ./config_slave2.yaml +uniflex-agent --config ./config_slave3.yaml + +# For debugging mode run with -v option diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/common.py b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/common.py new file mode 100755 index 0000000..370d25f --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/common.py @@ -0,0 +1,28 @@ +from uniflex.core import events + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class AveragedSpectrumScanSampleEvent(events.EventBase): + def __init__(self, avg): + super().__init__() + self.avg = avg + + +class StartMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class StopMyFilterEvent(events.EventBase): + def __init__(self): + super().__init__() + + +class ChangeWindowSizeEvent(events.EventBase): + def __init__(self, value): + super().__init__() + self.window = value diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave.yaml new file mode 100644 index 0000000..03b4660 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave.yaml @@ -0,0 +1,38 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:03']], + 'myMAC' : 'aa:aa:aa:aa:aa:01', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1', + 'mode': 'training', # training or working + 'numsClients': [1,1,1,1,1] + }} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave2.yaml new file mode 100644 index 0000000..cdc9e32 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave2.yaml @@ -0,0 +1,38 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], + 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], []], + 'myMAC' : 'aa:aa:aa:aa:aa:02', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2', + 'mode': 'training', # training or working + 'numsClients': [1,1,1,1,1] + }} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave3.yaml new file mode 100644 index 0000000..a6b4147 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave3.yaml @@ -0,0 +1,38 @@ +## UniFlex Agent config file + +config: + name: 'HC node' + info: 'filter runs on local node' + iface: 'lo' + iface: 'lo' + sub: "tcp://127.0.0.1:8990" + pub: "tcp://127.0.0.1:8989" + +broker: + xsub: "tcp://127.0.0.1:8990" + xpub: "tcp://127.0.0.1:8989" + +control_applications: + myFilter: + file : my_filter.py + class_name : MyAvgFilter + kwargs : {"window": 5} + + discovery: + module : uniflex_app_discovery_pyre + class_name : PyreDiscoverySlaveModule + kwargs: {"iface":"lo", "groupName":"uniflex_1234"} + +modules: + simple: + module : uniflex_module_simple + class_name : SimpleModule4 + devices : ['phy0'] + kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:01']], + 'myMAC' : 'aa:aa:aa:aa:aa:03', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3', + 'mode': 'training', # training or working + 'numsClients': [2,2,1,1,1] + }} + diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/my_filter.py new file mode 100755 index 0000000..59d59a2 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/my_filter.py @@ -0,0 +1,53 @@ +import logging +from uniflex.core import modules +from sbi.radio_device.events import SpectralScanSampleEvent +from common import AveragedSpectrumScanSampleEvent +from common import ChangeWindowSizeEvent + +__author__ = "Piotr Gawlowicz" +__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" +__version__ = "0.1.0" +__email__ = "{gawlowicz}@tkn.tu-berlin.de" + + +class MyAvgFilter(modules.ControlApplication): + def __init__(self, window): + super(MyAvgFilter, self).__init__() + self.log = logging.getLogger('MyFilter') + self.window = window + self.samples = [] + + @modules.on_event(ChangeWindowSizeEvent) + def change_window_size(self, event): + self.log.info("New window size: {}".format(event.window)) + self.window = event.window + + def change_window_size_func(self, newWindow): + self.log.info("New window size: {}".format(newWindow)) + self.window = newWindow + + def get_window_size(self): + return self.window + + @modules.on_event(SpectralScanSampleEvent) + def serve_spectral_scan_sample(self, event): + sample = event.sample + node = event.node + device = event.device + self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" + .format(sample, node, device)) + + self.samples.append(sample) + + if len(self.samples) == self.window: + s = sum(self.samples) + self.samples.pop(0) + avg = s / self.window + self.log.debug("Calculated average: {}".format(avg)) + event = AveragedSpectrumScanSampleEvent(avg) + self.send_event(event) + + def add_two(self, value): + value1 = value + 2 + value2 = value * 2 + return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/readme.txt new file mode 100644 index 0000000..5a22888 --- /dev/null +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/readme.txt @@ -0,0 +1,15 @@ +# 1. Run control program and all modules on local node +uniflex-agent --config ./config_local.yaml + +source ~/Uniflex/dev/bin/activate + +# 2a. Run control program in master node: +uniflex-broker +# 2b. Run control program in master node: +python3 rrm_agent.py --config ./config_master.yaml +# 2c. Run modules in slave node: +uniflex-agent --config ./config_slave.yaml +uniflex-agent --config ./config_slave2.yaml +uniflex-agent --config ./config_slave3.yaml + +# For debugging mode run with -v option diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 7601ed2..c22312c 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -41,7 +41,6 @@ def __init__(self,**kwargs): self.channel = 1 self.availableChannels = [] self.observationSpace = [] - self.registeredClients = self._create_client_list() self.lastObservation = [] self.actionSet = [] self.simulation = False @@ -55,6 +54,8 @@ def __init__(self,**kwargs): self.actionOrder = [] self.observationOrder = [] + self.registeredClients = self._create_interface_list() + if 'availableChannels' in kwargs: self.availableChannels = kwargs['availableChannels'] @@ -212,7 +213,7 @@ def get_num_clients(self): client_nums.append(len(infos)) return client_nums - def get_num_neighbours(self): + def get_num_current_neighbours(self): ''' Returns a list of numbers of neighbours of each ap ''' @@ -223,6 +224,18 @@ def get_num_neighbours(self): infos = device.get_current_neighbours(interface) neighbours.append(len(infos)) return neighbours + + def get_num_neighbours(self): + ''' + Returns a list of numbers of neighbours of each ap + ''' + neighbours = [] + for node in self.get_nodes(): + for device in node.get_devices(): + for interface in device.get_interfaces(): + infos = device.get_neighbours(interface) + neighbours.append(len(infos)) + return neighbours def get_bandwidth(self): ''' @@ -250,8 +263,10 @@ def get_bandwidth(self): for device in node.get_devices(): if type(device.my_control_flow) is not list: device.my_control_flow = [] + for i in range(self.scenarios): + device.my_control_flow.append([]) - for flow in device.my_control_flow: + for flow in device.my_control_flow[self.currentScenario]: flow['old'] = True for interface in device.get_interfaces(): @@ -261,12 +276,13 @@ def get_bandwidth(self): values = infos[mac] newTxBytes = int(values['tx bytes'][0]) - flow = [d for d in device.my_control_flow if d['mac address'] == mac] + flow = [d for d in device.my_control_flow[self.currentScenario] if d['mac address'] == mac] if len(flow) > 0: flow = flow[0] dif = datetime.datetime.now() - flow['last update'] tmpBandwidth = (newTxBytes - flow['tx bytes'] ) / (dif.total_seconds() + dif.microseconds / 1000000.0) if(self.simulation and self.simulationsteptime): + #print("calculate bandwidth: " + str(newTxBytes - flow['tx bytes']) + " Bytes in " + str(self.simulationsteptime)) tmpBandwidth = (newTxBytes - flow['tx bytes'] ) / (self.simulationsteptime) bandwidth[mac] = { 'bandwidth':(tmpBandwidth), @@ -277,7 +293,7 @@ def get_bandwidth(self): flow['last update'] = datetime.datetime.now() flow['old'] = False else : - device.my_control_flow.append({'mac address' : mac, 'tx bytes' : newTxBytes, 'last update' : datetime.datetime.now(), 'old' : False}) + device.my_control_flow[self.currentScenario].append({'mac address' : mac, 'tx bytes' : newTxBytes, 'last update' : datetime.datetime.now(), 'old' : False}) bandwidth[mac] = { 'mac' : mac, 'bandwidth': 0, @@ -285,9 +301,9 @@ def get_bandwidth(self): 'device': {'name': device.name, 'uuid': device.uuid}, 'interface': interface} - for flow in device.my_control_flow: + for flow in device.my_control_flow[self.currentScenario]: if flow['old']: - device.my_control_flow.remove(flow) + device.my_control_flow[self.currentScenario].remove(flow) return bandwidth def _get_raw_clientlist(self): @@ -315,8 +331,10 @@ def _get_raw_clientlist(self): for device in node.get_devices(): if type(device.my_control_flow) is not list: device.my_control_flow = [] + for i in range(self.scenarios): + device.my_control_flow.append([]) - for flow in device.my_control_flow: + for flow in device.my_control_flow[self.currentScenario]: flow['old'] = True for interface in device.get_interfaces(): @@ -439,7 +457,7 @@ def simulate_flows(self): mac = device.getHwAddr() flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw, 'iface': interface}) - + print("simulate for " + str(self.currentScenario)) # simulate packet counter on AP modules for node in self.get_nodes(): for device in node.get_devices(): @@ -468,7 +486,8 @@ def periodic_evaluation(self, event): def reset(self): - self.registeredClients = self._create_client_list() + print("reset") + self.registeredClients = self._create_interface_list() self.observationSpace = self.get_observationSpace() self.actionSpace = self.get_actionSpace() self.actionSet = [] @@ -498,6 +517,11 @@ def reset(self): if(self.simulation): self.simulate_flows() self.get_bandwidth() + + # fill obsersavion buffer + for i in range(self.scenarios): + self.get_observation() + self.currentScenario = self.scenarios -1 return def execute_action(self, action): @@ -511,7 +535,7 @@ def execute_action(self, action): #ifaceaction = int(action / (pow(len(self.availableChannels),apindex))) #ifaceaction = ifaceaction % len(self.availableChannels) self.set_channel(interface['node'], interface['device'], interface['iface'], - self.availableChannels[ifaceaction], None) + self.availableChannels[int(ifaceaction)], None) return def render(): @@ -526,7 +550,7 @@ def get_observationSpace(self): the maximum is 10 ''' #maxValues = [len(self.availableChannels) for i in self._create_interface_list()] - return spaces.Box(low=0, high=5, shape=(len(self._create_interface_list()),2), dtype=numpy.uint32) + return spaces.Box(low=0, high=6, shape=(len(self._create_interface_list()),2), dtype=numpy.uint32) #return spaces.MultiDiscrete(maxValues) #spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) @@ -542,7 +566,7 @@ def get_actionSpace(self): for key, interface in enumerate(interfaceList): self.log.info(str(key) + ":" + interface['device']) if len(interfaceList) == 0: - return spaces.Discrete(0) + return spaces.MultiDiscrete([0]) maxValues = [len(self.availableChannels) for i in self._create_interface_list()] return spaces.MultiDiscrete(maxValues) #([ 5, 2, 2 ])(pow(len(self.availableChannels), len(interfaceList))) @@ -558,14 +582,17 @@ def get_observation(self): resultUniflexOrder.append([client_nums[i], neighbours_nums[i]]) #switch order of values in list result = [] + for i in range(0, len(resultUniflexOrder)): + if i >= len(self.observationOrder): + break result.append(resultUniflexOrder[self.observationOrder[i]]) if self.mode == "training": #store obsersavion for next time with this scenario self.observations[self.currentScenario] = result #load obsersavion for next scenario - self.currentScenario ++ + self.currentScenario += 1 if self.currentScenario >= self.scenarios: self.currentScenario = 0 result = self.observations[self.currentScenario] @@ -577,9 +604,9 @@ def get_gameOver(self): Test if topology changes Bases on information, which client is registered at which AP ''' - clients = self._create_client_list() - clientHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in clients] - observationSpaceHash = [i['mac'] + i['node'] + i['device'] + i['iface'] for i in self.registeredClients] + clients = self._create_interface_list() + clientHash = [i['node'] + i['device'] + i['iface'] for i in clients] # i['mac'] + observationSpaceHash = [i['node'] + i['device'] + i['iface'] for i in self.registeredClients] #i['mac'] return not len(set(clientHash).symmetric_difference(set(observationSpaceHash))) == 0 def get_reward(self): diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 99fd699..23bc31d 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -14,29 +14,47 @@ import os from math import * from scipy.optimize import fsolve +import pickle +import datetime -AVGTIME_ONEVALUE_RAND = 20 -RANDVALUE_FIRST_EPISODE = 0.55 +AVGTIME_ONEVALUE_RAND = 60 +RANDVALUE_FIRST_EPISODE = 0.7 REWARD_INIT = 0.00001 +SORT_VALUES = True + sortedIndecies = [] ac_space = [] +scenarios = 5 +currentScenario = 0 + +lastreward = np.zeros(scenarios) +minreward = np.zeros(scenarios) +maxreward = np.zeros(scenarios) +lastaction = np.zeros(scenarios) def normalize_state(state, ob_space, s_size): - state = np.reshape(state, [1, s_size]) - obspacehigh = np.reshape(ob_space.high, [1, s_size]) - state = state *2 / obspacehigh - 1 - + global sortedIndecies + state = np.array(state) #sort states index = np.arange(state.shape[0]) index = index.reshape((-1,1)) - state = np.insert(state, -1, index, axis=1) - state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + state = np.concatenate((state, index), axis=1) + #sort input and output if configured + if SORT_VALUES: + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + #print("state" + str(state)) sortedIndecies = state[:,-1] + #print(sortedIndecies) state = np.delete(state, -1, axis=1) + + state = np.reshape(state, [1, s_size]) + # obspacehigh = np.reshape(ob_space.high, [1, s_size]) + state = state - 1 #*2 / obspacehigh - 1 + return state def guess_random_numbers_in_firstEpisode(a_size): - return AVGTIME_ONEVALUE_RAND * a_size + return AVGTIME_ONEVALUE_RAND * a_size * scenarios#**2 def guess_steps(a_size): return guess_random_numbers_in_firstEpisode(a_size) / RANDVALUE_FIRST_EPISODE @@ -46,14 +64,55 @@ def guess_epsilon_decay(steps, a_size): return fsolve(func, 0.9999999999)[0] def map_action(mappedAction): - action = np.zeros(len(mappedAction)) + action = np.zeros(len(ac_space.nvec)) for index in range(len(ac_space.nvec)): # filter action by the index ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) ifaceaction = ifaceaction % ac_space.nvec[0] - action[np.where(action == index)[0]] = ifaceaction + #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) + #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) + #action[np.where(sortedIndecies == index)[0]] = ifaceaction + action[sortedIndecies[index]] = ifaceaction return action +def reset_rewards(): + global maxreward + global minreward; + for i in range(scenarios): + maxreward[i] = REWARD_INIT + minreward[i] = np.inf + return + +def normalize_reward(reward, rewardpow, action): + global maxreward + global minreward; + global lastreward; + global currentScenario; + + orig = reward + + minreward[currentScenario] = min(reward, minreward[currentScenario]) + reward -= minreward[currentScenario] + + maxreward[currentScenario] = max(reward, maxreward[currentScenario]) + reward /= maxreward[currentScenario] + + print("reward:" + str(orig) + ", minreward:" + str(minreward[currentScenario]) + ", maxreward:" +str(maxreward[currentScenario]) + ", at scenario" + str(currentScenario)) + + #set reward to 1.0 if it is first value + if maxreward[currentScenario] == REWARD_INIT: + reward = 1.0 + + reward = pow(reward, rewardpow) + + #hysteresis + if action != lastaction[currentScenario] and abs(reward - lastreward[currentScenario]) < 0.1: + reward *= 0.9 + lastaction[currentScenario] = action + lastreward[currentScenario] = reward + + return reward + parser = argparse.ArgumentParser(description='Uniflex reader') parser.add_argument('--config', help='path to the uniflex config file', default=None) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) @@ -63,6 +122,7 @@ def map_action(mappedAction): parser.add_argument('--episodes', help='number of episodes in this execution. If not set, the agents runs infinitly long', default=None) parser.add_argument('--startepisode', help='The episode we start with', default=1) parser.add_argument('--trainingfile', help='file to load and store training data', default=None) +parser.add_argument('--cpus', help='Numbers of cpus for this process', default=1) args = parser.parse_args() if not args.config: @@ -96,6 +156,7 @@ def map_action(mappedAction): while True: state = env.reset() + currentScenario = 0 n = 0 ac_space = env.action_space @@ -103,25 +164,35 @@ def map_action(mappedAction): print("reset agent") print("Observation space: ", ob_space, ob_space.dtype) - print("Action space: ", ac_space, ac_space.n) + print("Action space: ", ac_space, ac_space.nvec) tmps_size = ob_space.shape s_size = tmps_size[0] * tmps_size[1] #s_size = list(map(lambda x: x * ob_space.high, s_size)) a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + if a_size == 0: + print("there is no vaild AP - sleep 2 seconds") + time.sleep(2) + continue + print("observation_space size:" + str(s_size)) state = normalize_state(state, ob_space, s_size) model = keras.Sequential() - model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) + model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) model.add(keras.layers.Dense(5, activation='relu')) model.add(keras.layers.Dense(a_size, activation='softmax')) model.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='categorical_crossentropy', metrics=['accuracy']) + config = tf.ConfigProto() + config.intra_op_parallelism_threads = int(args.cpus) + config.inter_op_parallelism_threads = int(args.cpus) + tf.Session(config=config) + if args.trainingfile and not os.path.isfile(args.trainingfile): try: model.load_weights(args.trainingfile) @@ -130,6 +201,15 @@ def map_action(mappedAction): print("Spaces does not match") except tf.errors.NotFoundError: print("File not found. Skip loading") + + try: + with open(args.trainingfile + '.var', 'rb') as f: # Python 3: open(..., 'wb') + lastreward, minreward, maxreward, lastaction = pickle.load(f) + print("Load variables of last run") + except ValueError: + print("File format is wrong") + except FileNotFoundError: + print("File not found. Skip loading") print("State (Observation) of System" + str(state)) try: @@ -138,11 +218,6 @@ def map_action(mappedAction): continue rewardsum = 0 - if a_size == 0: - print("there is no vaild AP - sleep 2 seconds") - time.sleep(2) - continue - steps = guess_steps(a_size) epsilon_decay = guess_epsilon_decay(steps, a_size) print("Initialize agent. Exploration rate is " + str(epsilon_decay) @@ -151,8 +226,7 @@ def map_action(mappedAction): rewardpow = int(log(a_size, 2)) episode = 1 - maxreward = REWARD_INIT - minreward = np.inf + reset_rewards() while episode < int(args.startepisode): epsilon_max *= 0.999 @@ -172,8 +246,6 @@ def map_action(mappedAction): epsilon_max *= 0.999 epsilon_max = max(pow(epsilon_max, 3), epsilon_min) done = False - lastreward = 0 - lastaction = 0 aps = int(log(a_size, numChannels)) @@ -181,46 +253,40 @@ def map_action(mappedAction): # actions.append([]) state = env.reset() + state_orig = state + currentScenario = 0 state = normalize_state(state, ob_space, s_size) while not done: # Choose action + ts = time.time() + print("\nnew step at " + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S,%f')) + print ("Run: " + str(run) + ", Episode: " + str(episode)) + print("Observation:" + str(state_orig)) + if np.random.rand(1) < epsilon: action = np.random.randint(a_size) else: action = np.argmax(model.predict(state)[0]) - action = map_action(action) + actionvector = map_action(action) - # Step - next_state, reward, done, _ = env.step(action) + print("Action:" +str(action) + ", Actionvector" + str(actionvector)) - minreward = min(reward, minreward) - reward -= minreward - - maxreward = max(reward, maxreward) - reward /= maxreward - - #set reward to 1.0 if it is first value - if maxreward == REWARD_INIT: - reward = 1.0 - - reward = pow(reward, rewardpow) + # Step + next_state, reward, done, _ = env.step(actionvector) - #hysteresis - if action != lastaction and abs(reward - lastreward) < 0.1: - reward *= 0.9 - lastaction = action - lastreward = reward + reward = normalize_reward(reward, rewardpow, action) if done: # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" # .format(e, total_episodes, time, rewardsum, epsilon)) - maxreward = REWARD_INIT - minreward = np.inf + reset_rewards() + print("setting changes") break - + + state_orig = next_state next_state = normalize_state(next_state, ob_space, s_size) @@ -237,7 +303,6 @@ def map_action(mappedAction): print("agent new learning" + str(target_f)) model.fit(state, target_f, epochs=1, verbose=0) - state = next_state #rewardsum += reward if epsilon > epsilon_min: epsilon *= epsilon_decay @@ -246,7 +311,7 @@ def map_action(mappedAction): if args.output: with open(args.output, 'a') as csvFile: writer = csv.writer(csvFile) - writer.writerow([reward, action, episode]) + writer.writerow([reward, action, episode,currentScenario]) csvFile.close() #for ap in range(0, aps): @@ -256,10 +321,10 @@ def map_action(mappedAction): print ("Reward: " + str(reward)) print ("GameOver: " + str(done)) - print ("State: " + str(state)) - print ("Channel selection:" + str(action)) - print ("Run: " + str(run) + ", Episode: " + str(episode)) - print ("next step") + #print ("State: " + str(state)) + #print ("Channel selection:" + str(action)) + + state = next_state if args.plot: plt.subplot(211) @@ -273,12 +338,18 @@ def map_action(mappedAction): plt.xlabel('step') plt.pause(0.05) + currentScenario += 1 + if currentScenario >= scenarios: + currentScenario = 0 + run += 1 # next episode if enough steps, if enough episodes -> exit if steps <= run: if args.trainingfile: model.save_weights(args.trainingfile) + with open(args.trainingfile + '.var', 'wb') as f: # Python 3: open(..., 'wb') + pickle.dump([lastreward, minreward, maxreward, lastaction], f) if args.episodes and int(args.episodes) <= episode: os._exit(1) else: diff --git a/openAI_RRM/rrm_agent_evalmodel1_cliap-apcli.py b/openAI_RRM/rrm_agent_evalmodel1_cliap-apcli.py new file mode 100644 index 0000000..4402740 --- /dev/null +++ b/openAI_RRM/rrm_agent_evalmodel1_cliap-apcli.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +import tensorflow as tf +import tensorflow.contrib.slim as slim +import numpy as np +from tensorflow import keras +import argparse +import logging +import time +import csv +import os +from math import * +from scipy.optimize import fsolve +from gym import spaces + +sortedIndecies = [] +ac_space = [] +BANDWITH_ON_CHANNEL = 54e6 +numChannels = 2 + +def normalize_state(state, ob_space, s_size): + global sortedIndecies + state = np.array(state) + + #sort states + index = np.arange(state.shape[0]) + index = index.reshape((-1,1)) + state = np.concatenate((state, index), axis=1) + # + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + sortedIndecies = state[:,-1] + state = np.delete(state, -1, axis=1) + state = np.reshape(state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + #state = state *2 / obspacehigh - 1 + state = state -1 + + return state + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) + #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) + #action[np.where(sortedIndecies == index)[0]] = ifaceaction + action[sortedIndecies[index]] = ifaceaction + return action + +def eval(clients, aps): + errorcounter_cli = 0 + errorcounter_ap = 0 + counter = 0 + + for client in clients: + for ap in aps: + state_cli = np.array([client['clients'], ap]) + state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) + actionap = np.argmax(modelap.predict(state_ap_norm)[0]) + actionvectorap = map_action(actionap) + + #success_cli = actionvector in client['valid'] + #success_ap = actionvectorap in client['valid'] + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + success_ap = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvectorap, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_ap = True + break + + print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) + print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) + counter += 1 + + if not success_ap: + errorcounter_ap +=1 + + if not success_cli: + errorcounter_cli +=1 + + print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") + print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") + +def calculate_reward(clients_p_ap, action): + reward = 0 + + for ap in range(len(action)): + channel = action[ap] + + #search num aps on same channel + same_chan = 0 + for act in action: + if act == channel: + same_chan += 1 + + ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan + reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) + return reward + +def eval_handover(aps, client, new_clients): + print("Current state:") + for ap in aps: + state_cli = np.array([client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + reward = calculate_reward(client['clients'], ap) + print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + + print("Handover simulation") + for new_client in new_clients: + for ap in aps: + state_cli = np.array([new_client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + reward = calculate_reward(new_client['clients'], actionvector) + + success_cli = False + for tmp in new_client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + print("[Cli, Ap]: Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + +ac_space = spaces.MultiDiscrete([2,2,2]) +ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) +trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test_ap-cli/logs/controller_3_112neuronalesNetz.train" +trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" + +clients = [{'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [1, 2, 3], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [6, 0, 0], 'valid':[[1,0,0], [0,1,1]]}, + {'clients': [1, 5, 1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [2, 2, 2], 'valid':[[0,1,1], [1,0,1], [1,1,0], [0,1,0], [0,0,1], [1,0,0]]}, + {'clients': [5, 5, 1], 'valid':[[0,1,1], [1,0,1], [0,1,0], [1,0,0]]} + ] +clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} + ] +handover = [{'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'valid':[[1,0,1], [0,1,0]]} + ] + +handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, + {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} + ] + +handover3 = [{'clients': [2, 2, 1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, + {'clients': [2, 1, 2], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, + {'clients': [1, 2, 2], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} + ] + +aps = [[2,2,2], [1,1,0], [1,0,1], [0,1,1], [0,0,0]] +aps2 = [[1,2,1], [0,0,1]] + +#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], +# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], +# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] +#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], +# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], +# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] + +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.nvec) + +tmps_size = ob_space.shape +s_size = tmps_size[0] * tmps_size[1] +#s_size = list(map(lambda x: x * ob_space.high, s_size)) +a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + +print("observation_space size:" + str(s_size)) +print("Data: Trained Data of setting 222 with sorting agent. Observation has actual neighbours. Experiment 1") + +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +model.add(keras.layers.Dense(5, activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) +model.load_weights(trainingfile) + +modelap = keras.Sequential() +modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +modelap.add(keras.layers.Dense(5, activation='relu')) +modelap.add(keras.layers.Dense(a_size, activation='softmax')) +modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) +modelap.load_weights(trainingfileap) + +print("\nSame domain:") +eval(clients, aps) + +print("\nMan in the middle:") +eval(clients2, aps2) + +print("\nHandover test") +eval_handover(aps, handover[0], handover[1:]) + +print("\nHandover test 2") +eval_handover(aps, handover2[0], handover2[1:]) + +print("\nHandover test 3") +eval_handover(aps, handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel2_sameset112.py b/openAI_RRM/rrm_agent_evalmodel2_sameset112.py new file mode 100644 index 0000000..f066ef3 --- /dev/null +++ b/openAI_RRM/rrm_agent_evalmodel2_sameset112.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +import tensorflow as tf +import tensorflow.contrib.slim as slim +import numpy as np +from tensorflow import keras +import argparse +import logging +import time +import csv +import os +from math import * +from scipy.optimize import fsolve +from gym import spaces + +sortedIndecies = [] +ac_space = [] +BANDWITH_ON_CHANNEL = 54e6 +numChannels = 2 +SORT_VALUES = True + +def normalize_state(state, ob_space, s_size): + global sortedIndecies + state = np.array(state) + + #sort states + index = np.arange(state.shape[0]) + index = index.reshape((-1,1)) + state = np.concatenate((state, index), axis=1) + # + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + sortedIndecies = state[:,-1] + state = np.delete(state, -1, axis=1) + state = np.reshape(state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + #state = state *2 / obspacehigh - 1 + state = state -1 + + return state + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) + #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) + #action[np.where(sortedIndecies == index)[0]] = ifaceaction + action[sortedIndecies[index]] = ifaceaction + return action + +def eval(clients, aps): + errorcounter_cli = 0 + errorcounter_ap = 0 + counter = 0 + + for client in clients: + for ap in aps: + state_cli = np.array([client['clients'], ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) + #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) + #actionvectorap = map_action(actionap) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + #success_ap = False + #for tmp in client['valid']: + # tmpval = True + # for a, b in zip(actionvectorap, tmp): + # if a != b: + # tmpval = False + # break + # if tmpval: + # success_ap = True + # break + + print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) + #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) + counter += 1 + + #if not success_ap: + # errorcounter_ap +=1 + + if not success_cli: + errorcounter_cli +=1 + + print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") + #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") + +def calculate_reward(clients_p_ap, action): + reward = 0 + + for ap in range(len(action)): + channel = action[ap] + + #search num aps on same channel + same_chan = 0 + for act in action: + if act == channel: + same_chan += 1 + + ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan + reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) + return reward + +def get_best_reward(client, ap): + state_cli = np.array([client, ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + reward = calculate_reward(client, actionvector) + return reward + +def eval_handover(aps, client, new_clients): + print("Current state:") + for ap in aps: + state_cli = np.array([client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + reward = get_best_reward(client['clients'], ap) + + print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + + print("Handover simulation") + for new_client in new_clients: + for ap in aps: + state_cli = np.array([new_client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + reward = calculate_reward(new_client['clients'], actionvector) + + success_cli = False + for tmp in new_client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + print("[Cli, Ap]: Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + +ac_space = spaces.MultiDiscrete([2,2,2]) +ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) +#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" +trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/training3_sort_cli-stap_1set_112/logs/controller_3_112neuronalesNetz.train" + +clients = [{'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [1, 2, 3], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [6, 0, 0], 'valid':[[1,0,0], [0,1,1]]}, + {'clients': [1, 5, 1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [2, 2, 2], 'valid':[[0,1,1], [1,0,1], [1,1,0], [0,1,0], [0,0,1], [1,0,0]]}, + {'clients': [5, 5, 1], 'valid':[[0,1,1], [1,0,1], [0,1,0], [1,0,0]]} + ] +clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} + ] +handover = [{'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'valid':[[1,0,1], [0,1,0]]} + ] + +handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, + {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} + ] + +handover3 = [{'clients': [2, 2, 1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, + {'clients': [2, 1, 2], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, + {'clients': [1, 2, 2], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} + ] + +aps = [[2,2,2]] +aps2 = [[1,2,1]] + +#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], +# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], +# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] +#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], +# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], +# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] + +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.nvec) + +tmps_size = ob_space.shape +s_size = tmps_size[0] * tmps_size[1] +#s_size = list(map(lambda x: x * ob_space.high, s_size)) +a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + +print("observation_space size:" + str(s_size)) + +print("Data: Trained Data of Setting 112. Sorted by cli and aps Experiment 2") + +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +model.add(keras.layers.Dense(5, activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) +model.load_weights(trainingfile) + +#modelap = keras.Sequential() +#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +#modelap.add(keras.layers.Dense(5, activation='relu')) +#modelap.add(keras.layers.Dense(a_size, activation='softmax')) +#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), +# loss='categorical_crossentropy', +# metrics=['accuracy']) +#modelap.load_weights(trainingfileap) + +print("\nSame domain:") +eval(clients, aps) + +print("\nMan in the middle:") +eval(clients2, aps2) + +print("\nHandover test") +eval_handover(aps, handover[0], handover[1:]) + +print("\nHandover test 2") +eval_handover(aps, handover2[0], handover2[1:]) + +print("\nHandover test 3") +eval_handover(aps, handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel3_difset_sort_cli-ap.py b/openAI_RRM/rrm_agent_evalmodel3_difset_sort_cli-ap.py new file mode 100644 index 0000000..c97b091 --- /dev/null +++ b/openAI_RRM/rrm_agent_evalmodel3_difset_sort_cli-ap.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +import tensorflow as tf +import tensorflow.contrib.slim as slim +import numpy as np +from tensorflow import keras +import argparse +import logging +import time +import csv +import os +from math import * +from scipy.optimize import fsolve +from gym import spaces + +sortedIndecies = [] +ac_space = [] +BANDWITH_ON_CHANNEL = 54e6 +numChannels = 2 + +def normalize_state(state, ob_space, s_size): + global sortedIndecies + state = np.array(state) + + #sort states + index = np.arange(state.shape[0]) + index = index.reshape((-1,1)) + state = np.concatenate((state, index), axis=1) + # + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + sortedIndecies = state[:,-1] + state = np.delete(state, -1, axis=1) + state = np.reshape(state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + #state = state *2 / obspacehigh - 1 + state = state -1 + + return state + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) + #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) + #action[np.where(sortedIndecies == index)[0]] = ifaceaction + action[sortedIndecies[index]] = ifaceaction + return action + +def eval(clients): + errorcounter_cli = 0 + errorcounter_ap = 0 + counter = 0 + + for client in clients: + ap = client['aps'] + state_cli = np.array([client['clients'], ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) + #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) + #actionvectorap = map_action(actionap) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + #success_ap = False + #for tmp in client['valid']: + # tmpval = True + # for a, b in zip(actionvectorap, tmp): + # if a != b: + # tmpval = False + # break + # if tmpval: + # success_ap = True + # break + + print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) + #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) + counter += 1 + + #if not success_ap: + # errorcounter_ap +=1 + + if not success_cli: + errorcounter_cli +=1 + + print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") + #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") + +def calculate_reward(clients_p_ap, action): + reward = 0 + + for ap in range(len(action)): + channel = action[ap] + + #search num aps on same channel + same_chan = 0 + for act in action: + if act == channel: + same_chan += 1 + + ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan + reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) + return reward + +def get_best_reward(client, ap): + state_cli = np.array([client, ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + reward = calculate_reward(client, actionvector) + return reward + +def eval_handover(client, new_clients): + print("Current state:") + ap = client['aps'] + state_cli = np.array([client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + reward = get_best_reward(client['clients'], ap) + + print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + + print("Handover simulation") + for new_client in new_clients: + ap = new_client['aps'] + state_cli = np.array([new_client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + reward = calculate_reward(new_client['clients'], actionvector) + + success_cli = False + for tmp in new_client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + print("[Cli, Ap]: Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + +ac_space = spaces.MultiDiscrete([2,2,2]) +ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) +#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" +trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/training3_sort_cli-stap_3set_190706_small/logs/controller_3_varSetsmallneuronalesNetz.train" + +clients = [ {'clients': [1, 1, 5], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [1, 3, 2], 'aps': [2,2,2], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [5, 3, 4], 'aps': [2,2,2], 'valid':[[1,0,0], [0,1,1]]}, + {'clients': [5, 1, 3], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [2, 4, 2], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [7, 1, 5], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [4, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, + {'clients': [1, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]} + ] +#clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} +# ] +handover = [{'clients': [1, 5, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]} + ] + +handover2 = [{'clients': [1, 5, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]} + ] + +#handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, +# {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} +# ] + +handover3 = [{'clients': [2, 2, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, + {'clients': [2, 1, 2], 'aps': [1,2,1], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, + {'clients': [1, 2, 2], 'aps': [1,2,1], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} + ] + +#aps = [[2,2,2]] +#aps2 = [[1,2,1]] + +#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], +# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], +# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] +#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], +# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], +# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] + +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.nvec) + +tmps_size = ob_space.shape +s_size = tmps_size[0] * tmps_size[1] +#s_size = list(map(lambda x: x * ob_space.high, s_size)) +a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + +print("observation_space size:" + str(s_size)) +print("Data: Trained Data of different settings with sorting agent. Experiment 3") + +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +model.add(keras.layers.Dense(5, activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) +model.load_weights(trainingfile) + +#modelap = keras.Sequential() +#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +#modelap.add(keras.layers.Dense(5, activation='relu')) +#modelap.add(keras.layers.Dense(a_size, activation='softmax')) +#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), +# loss='categorical_crossentropy', +# metrics=['accuracy']) +#modelap.load_weights(trainingfileap) + +print("\nSame domain:") +eval(clients) + +#print("\nMan in the middle:") +#eval(clients2, aps2) + +print("\nHandover test") +eval_handover(handover[0], handover[1:]) + +print("\nHandover test 2") +eval_handover(handover2[0], handover2[1:]) + +print("\nHandover test 3") +eval_handover(handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel4_difset_unsorted.py b/openAI_RRM/rrm_agent_evalmodel4_difset_unsorted.py new file mode 100644 index 0000000..0446cde --- /dev/null +++ b/openAI_RRM/rrm_agent_evalmodel4_difset_unsorted.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +import tensorflow as tf +import tensorflow.contrib.slim as slim +import numpy as np +from tensorflow import keras +import argparse +import logging +import time +import csv +import os +from math import * +from scipy.optimize import fsolve +from gym import spaces + +sortedIndecies = [] +ac_space = [] +BANDWITH_ON_CHANNEL = 54e6 +numChannels = 2 +SORT_VALUES = False + +def normalize_state(state, ob_space, s_size): + global sortedIndecies + state = np.array(state) + + #sort states + index = np.arange(state.shape[0]) + index = index.reshape((-1,1)) + state = np.concatenate((state, index), axis=1) + # + if SORT_VALUES: + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + sortedIndecies = state[:,-1] + state = np.delete(state, -1, axis=1) + state = np.reshape(state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + #state = state *2 / obspacehigh - 1 + state = state -1 + + return state + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) + #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) + #action[np.where(sortedIndecies == index)[0]] = ifaceaction + action[sortedIndecies[index]] = ifaceaction + return action + +def eval(clients): + errorcounter_cli = 0 + errorcounter_ap = 0 + counter = 0 + + for client in clients: + ap = client['aps'] + state_cli = np.array([client['clients'], ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) + #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) + #actionvectorap = map_action(actionap) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + #success_ap = False + #for tmp in client['valid']: + # tmpval = True + # for a, b in zip(actionvectorap, tmp): + # if a != b: + # tmpval = False + # break + # if tmpval: + # success_ap = True + # break + + print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) + #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) + counter += 1 + + #if not success_ap: + # errorcounter_ap +=1 + + if not success_cli: + errorcounter_cli +=1 + + print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") + #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") + +def calculate_reward(clients_p_ap, action): + reward = 0 + + for ap in range(len(action)): + channel = action[ap] + + #search num aps on same channel + same_chan = 0 + for act in action: + if act == channel: + same_chan += 1 + + ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan + reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) + return reward + +def get_best_reward(client, ap): + state_cli = np.array([client, ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + reward = calculate_reward(client, actionvector) + return reward + +def eval_handover(client, new_clients): + print("Current state:") + ap = client['aps'] + state_cli = np.array([client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + reward = get_best_reward(client['clients'], ap) + + print("Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + + print("Handover simulation") + for new_client in new_clients: + ap = new_client['aps'] + state_cli = np.array([new_client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + reward = calculate_reward(new_client['clients'], actionvector) + + success_cli = False + for tmp in new_client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + print("Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + +ac_space = spaces.MultiDiscrete([2,2,2]) +ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) +#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" +trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/training4_unsort_3set_2/logs/controller_3_varSetneuronalesNetz.train" + +clients = [ {'clients': [1, 1, 5], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [1, 3, 2], 'aps': [2,2,2], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [5, 3, 4], 'aps': [2,2,2], 'valid':[[1,0,0], [0,1,1]]}, + {'clients': [5, 1, 3], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [2, 4, 2], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [7, 1, 5], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [4, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, + {'clients': [1, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]} + ] +#clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} +# ] +handover = [{'clients': [1, 5, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]} + ] + +handover2 = [{'clients': [1, 5, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]} + ] + +#handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, +# {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} +# ] + +handover3 = [{'clients': [2, 2, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, + {'clients': [2, 1, 2], 'aps': [1,2,1], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, + {'clients': [1, 2, 2], 'aps': [1,2,1], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} + ] + +#aps = [[2,2,2]] +#aps2 = [[1,2,1]] + +#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], +# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], +# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] +#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], +# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], +# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] + +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.nvec) + +tmps_size = ob_space.shape +s_size = tmps_size[0] * tmps_size[1] +#s_size = list(map(lambda x: x * ob_space.high, s_size)) +a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + +print("observation_space size:" + str(s_size)) + +print("Data: Trained Data of different settings with unsorted agent. Experiment 4") + +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +model.add(keras.layers.Dense(5, activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) +model.load_weights(trainingfile) + +#modelap = keras.Sequential() +#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +#modelap.add(keras.layers.Dense(5, activation='relu')) +#modelap.add(keras.layers.Dense(a_size, activation='softmax')) +#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), +# loss='categorical_crossentropy', +# metrics=['accuracy']) +#modelap.load_weights(trainingfileap) + +print("\nSame domain:") +eval(clients) + +#print("\nMan in the middle:") +#eval(clients2, aps2) + +print("\nHandover test") +eval_handover(handover[0], handover[1:]) + +print("\nHandover test 2") +eval_handover(handover2[0], handover2[1:]) + +print("\nHandover test 3") +eval_handover(handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py b/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py new file mode 100644 index 0000000..3de1ea0 --- /dev/null +++ b/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +import tensorflow as tf +import tensorflow.contrib.slim as slim +import numpy as np +from tensorflow import keras +import argparse +import logging +import time +import csv +import os +from math import * +from scipy.optimize import fsolve +from gym import spaces + +sortedIndecies = [] +ac_space = [] +BANDWITH_ON_CHANNEL = 54e6 +numChannels = 2 +SORT_VALUES = True + +def normalize_state(state, ob_space, s_size): + global sortedIndecies + state = np.array(state) + + #sort states + index = np.arange(state.shape[0]) + index = index.reshape((-1,1)) + state = np.concatenate((state, index), axis=1) + # + if SORT_VALUES: + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + sortedIndecies = state[:,-1] + state = np.delete(state, -1, axis=1) + state = np.reshape(state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + #state = state *2 / obspacehigh - 1 + state = state -1 + + return state + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) + #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) + #action[np.where(sortedIndecies == index)[0]] = ifaceaction + action[sortedIndecies[index]] = ifaceaction + return action + +def eval(clients): + errorcounter_cli = 0 + errorcounter_ap = 0 + counter = 0 + + for client in clients: + ap = client['aps'] + state_cli = np.array([client['clients'], ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) + #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) + #actionvectorap = map_action(actionap) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + #success_ap = False + #for tmp in client['valid']: + # tmpval = True + # for a, b in zip(actionvectorap, tmp): + # if a != b: + # tmpval = False + # break + # if tmpval: + # success_ap = True + # break + + print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) + #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) + counter += 1 + + #if not success_ap: + # errorcounter_ap +=1 + + if not success_cli: + errorcounter_cli +=1 + + print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") + #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") + +def calculate_reward(clients_p_ap, action): + reward = 0 + + for ap in range(len(action)): + channel = action[ap] + + #search num aps on same channel + same_chan = 0 + for act in action: + if act == channel: + same_chan += 1 + + ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan + reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) + return reward + +def get_best_reward(client, ap): + state_cli = np.array([client, ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + reward = calculate_reward(client, actionvector) + return reward + +def eval_handover(client, new_clients): + print("Current state:") + ap = client['aps'] + state_cli = np.array([client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + reward = get_best_reward(client['clients'], ap) + + print("Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + + print("Handover simulation") + for new_client in new_clients: + ap = new_client['aps'] + state_cli = np.array([new_client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + reward = calculate_reward(new_client['clients'], actionvector) + + success_cli = False + for tmp in new_client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + print("Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + + +ac_space = spaces.MultiDiscrete([2,2,2]) +ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) +#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" +trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/training5_1_sort_3set_detecttopo/logs/controller_3_varSetsmallneuronalesNetz.train" + +clients = [ {'clients': [1, 1, 2], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [1, 1, 1], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [1, 1, 5], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [1, 3, 2], 'aps': [2,2,2], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [5, 3, 4], 'aps': [2,2,2], 'valid':[[1,0,0], [0,1,1]]}, + {'clients': [5, 1, 3], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [2, 4, 2], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [7, 1, 5], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, + {'clients': [4, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, + {'clients': [1, 1, 1], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, + {'clients': [1, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]} + ] +#clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} +# ] +handover = [{'clients': [1, 5, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]} + ] + +handover2 = [{'clients': [1, 5, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]} + ] + +#handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, +# {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} +# ] + +handover3 = [{'clients': [2, 2, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, + {'clients': [2, 1, 2], 'aps': [1,2,1], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, + {'clients': [1, 2, 2], 'aps': [1,2,1], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} + ] + +#aps = [[2,2,2]] +#aps2 = [[1,2,1]] + +#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], +# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], +# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] +#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], +# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], +# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] + +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.nvec) + +tmps_size = ob_space.shape +s_size = tmps_size[0] * tmps_size[1] +#s_size = list(map(lambda x: x * ob_space.high, s_size)) +a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + +print("observation_space size:" + str(s_size)) +print("Data: Trained Data of different settings with sorting agent. Experiment 3") + +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +#model.add(keras.layers.Dense(5, activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) +model.load_weights(trainingfile) + +#modelap = keras.Sequential() +#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +#modelap.add(keras.layers.Dense(5, activation='relu')) +#modelap.add(keras.layers.Dense(a_size, activation='softmax')) +#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), +# loss='categorical_crossentropy', +# metrics=['accuracy']) +#modelap.load_weights(trainingfileap) + +print("\nSame domain:") +eval(clients) + +#print("\nMan in the middle:") +#eval(clients2, aps2) + +print("\nHandover test") +eval_handover(handover[0], handover[1:]) + +print("\nHandover test 2") +eval_handover(handover2[0], handover2[1:]) + +print("\nHandover test 3") +eval_handover(handover3[0], handover3[1:]) From b204fbee2a49329a086cf6b643702563a09d8019 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Thu, 25 Jul 2019 10:10:03 +0200 Subject: [PATCH 46/54] run multiple scenarios --- .../Setting3_varSet/config_slave.yaml | 6 +- .../Setting3_varSet/config_slave2.yaml | 6 +- .../Setting3_varSet/config_slave3.yaml | 6 +- openAI_RRM/channel_controller.py | 49 +- openAI_RRM/config_master_simulation.yaml | 2 +- ...valmodel5_difset_sort_cli-ap_detecttopo.py | 4 +- openAI_RRM/rrm_agent_evalmodel6_multi.py | 375 ++++++++++++++ openAI_RRM/rrm_agent_multi.py | 467 ++++++++++++++++++ 8 files changed, 884 insertions(+), 31 deletions(-) create mode 100644 openAI_RRM/rrm_agent_evalmodel6_multi.py create mode 100644 openAI_RRM/rrm_agent_multi.py diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml index 3f02d06..5635feb 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml @@ -29,13 +29,15 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], - 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:03']], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:01', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1', 'mode': 'generator', # training or working or generator 'numsClients': [1,1,5,2,4], #for generator 'maxNumClients' : 100, - 'scenariosPerAPSetting': 40 + 'scenariosPerAPSetting': 60, + 'clientPrefix' : "cc:cc:cc:cc:01:", + 'scenarioBackup' : '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/AP1_scenarios.var' }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml index 3aeb32e..4d21eb9 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml @@ -29,13 +29,15 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], - 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], []], + 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], []], 'myMAC' : 'aa:aa:aa:aa:aa:02', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2', 'mode': 'generator', # training or working or generator 'numsClients': [1,3,1,4,3], #for generator 'maxNumClients' : 100, - 'scenariosPerAPSetting': 40 + 'clientPrefix' : "cc:cc:cc:cc:02:", + 'scenariosPerAPSetting': 60, + 'scenarioBackup' : '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/AP2_scenarios.var' }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml index 21dc9a2..3a59c5a 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml @@ -29,13 +29,15 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], - 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:01']], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:01']], 'myMAC' : 'aa:aa:aa:aa:aa:03', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3', 'mode': 'generator', # training or working or generator 'numsClients': [5,2,3,2,2], #for generator 'maxNumClients' : 100, - 'scenariosPerAPSetting': 40 + 'scenariosPerAPSetting': 60, + 'clientPrefix' : "cc:cc:cc:cc:03:", + 'scenarioBackup' : '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/AP3_scenarios.var' }} diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index c22312c..7edc8ad 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -351,21 +351,23 @@ def get_interfaces(self): ''' Returns a data structure of all available interfaces in the system It is structured as follows: - { - 'uuid_of_node_1': { + [ + { 'hostname' : 'hostname of node1', 'uuid' : 'uuid of node1', - 'devices' : { - 'name' : 'name of device1', - 'uuid' : 'uuid of device1', - 'interfaces' : [ - 'name of iface1', 'name of iface2' - ] - }, + 'devices' : [ + { + 'name' : 'name of device1', + 'uuid' : 'uuid of device1', + 'interfaces' : [ + 'name of iface1', 'name of iface2' + ] + }, + ], ... }, ... - } + ] fills self.aporder. Map index in uniflex to index in order list fills self.observationOrder. Map index in agent to index in uniflex list @@ -375,10 +377,10 @@ def get_interfaces(self): orphanApId = len(self.aporder) self.actionOrder = [] - interfaces = {} + interfaces = [] for node in self.get_nodes(): nodeinfo = {'hostname': node.hostname, 'uuid': node.uuid} - devices = {} + devices = [] for device in node.get_devices(): devinfo = {'name': device.name, 'uuid': device.uuid} interfaces_tmp = [] @@ -398,9 +400,9 @@ def get_interfaces(self): orphanApId += 1 devinfo['interfaces'] = interfaces_tmp - devices[device.uuid] = devinfo + devices.append(devinfo) nodeinfo['devices'] = devices - interfaces[node.uuid] = nodeinfo + interfaces.append(nodeinfo) self.observationOrder = [] print(self.actionOrder) @@ -505,22 +507,23 @@ def reset(self): # set a start channel for each interface: channel = 1 - for nodeUuid, node in interfaces.items(): - for devUuid, device in node['devices'].items(): + for node in interfaces: + for device in node['devices']: for iface in device['interfaces']: self.set_channel( node['uuid'], device['uuid'], iface, channel, None) channel += 5 if channel > 12: channel = 1 - # clear bandwidth counter - if(self.simulation): - self.simulate_flows() - self.get_bandwidth() # fill obsersavion buffer for i in range(self.scenarios): self.get_observation() + # clear bandwidth counter + if(self.simulation): + self.simulate_flows() + self.get_bandwidth() + self.currentScenario = self.scenarios -1 return @@ -532,6 +535,7 @@ def execute_action(self, action): for index, interface in enumerate(self._create_interface_list()): apindex = self.actionOrder[index] ifaceaction = action[apindex] + #self.log.info(str(index) + "List-AP is " + str(apindex) + " registered AP, gets channel " + str(self.availableChannels[int(ifaceaction)]) + ", UUID: " +str(interface['device'])) #ifaceaction = int(action / (pow(len(self.availableChannels),apindex))) #ifaceaction = ifaceaction % len(self.availableChannels) self.set_channel(interface['node'], interface['device'], interface['iface'], @@ -619,6 +623,7 @@ def get_reward(self): self.simulate_flows() bandwidthList = self.get_bandwidth() + #self.log.info("Bandwidth: " + str(bandwidthList)) #bandwidth = sorted(bandwidth, key=lambda k: k['mac']) reward = 0 for key in bandwidthList: @@ -663,8 +668,8 @@ def _create_interface_list(self): ''' interfaceList = [] interfaces = self.get_interfaces() - for nodeUuid, node in interfaces.items(): - for devUuid, device in node['devices'].items(): + for node in interfaces: + for device in node['devices']: for iface in device['interfaces']: interfaceList.append({'node': node['uuid'], 'device': device['uuid'], 'iface': iface}) return interfaceList diff --git a/openAI_RRM/config_master_simulation.yaml b/openAI_RRM/config_master_simulation.yaml index 90e51d0..d39683d 100644 --- a/openAI_RRM/config_master_simulation.yaml +++ b/openAI_RRM/config_master_simulation.yaml @@ -21,7 +21,7 @@ control_applications: 'steptime' : 1, 'simulation': True, 'mode': 'training', # training or working - 'scenarios': 5, + 'scenarios': 180, 'order': ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:03', 'aa:aa:aa:aa:aa:04', 'aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:06', 'aa:aa:aa:aa:aa:07','aa:aa:aa:aa:aa:08'] } diff --git a/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py b/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py index 3de1ea0..a643745 100644 --- a/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py +++ b/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py @@ -193,7 +193,7 @@ def eval_handover(client, new_clients): ac_space = spaces.MultiDiscrete([2,2,2]) ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) #trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" -trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/training5_1_sort_3set_detecttopo/logs/controller_3_varSetsmallneuronalesNetz.train" +trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/Training_120_2_80/logs/controller_3_varSetneuronalesNetz.train" clients = [ {'clients': [1, 1, 2], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, {'clients': [1, 1, 1], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, @@ -257,7 +257,7 @@ def eval_handover(client, new_clients): print("Data: Trained Data of different settings with sorting agent. Experiment 3") model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) #model.add(keras.layers.Dense(5, activation='relu')) model.add(keras.layers.Dense(a_size, activation='softmax')) model.compile(optimizer=tf.train.AdamOptimizer(0.001), diff --git a/openAI_RRM/rrm_agent_evalmodel6_multi.py b/openAI_RRM/rrm_agent_evalmodel6_multi.py new file mode 100644 index 0000000..b874fbf --- /dev/null +++ b/openAI_RRM/rrm_agent_evalmodel6_multi.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +import tensorflow as tf +import tensorflow.contrib.slim as slim +import numpy as np +from tensorflow import keras +import argparse +import logging +import time +import csv +import os +from math import * +from scipy.optimize import fsolve +from gym import spaces + +sortedIndecies = [] +ac_space = [] +BANDWITH_ON_CHANNEL = 54e6 +numChannels = 2 +SORT_VALUES = True +topologies = [[1,2,1], [2,2,2], [1,0,1]] +N_test = int(1000) +aps = 3 +maxclients = 100 + +def zeros(anz): + return np.zeros(anz) + +def ones(anz): + return np.ones(anz) + +def calculate_all_best_action(clients, aps): + aps_sort = sorted(aps) + #if string topology + if aps_sort == [1,1,2]: + #set ap in the middle to 1, all other to 0 + result = [zeros(3), ones(3)] + result[0][aps.index(2)] = 1 + result[1][aps.index(2)] = 0 + return result + + # if island topology + if aps_sort == [0,1,1]: + #set on of the neighouring aps to 1, all other to 0 + result = [] + group = np.where(np.array(aps) == 1) + for elem in group[0]: + myresult = zeros(3) + myresult[elem] = 1 + result.append(myresult) + myresult = ones(3) + myresult[elem] = 0 + result.append(myresult) + return result + + #if all aps can hear all other + if aps_sort == [2,2,2]: + result = [] + #get ap with most clients + clients_sort = sorted(clients) + ap_most = np.where(clients == clients_sort[2]) + for elem in ap_most[0]: + myresult = zeros(3) + myresult[elem] = 1 + result.append(myresult) + myresult = ones(3) + myresult[elem] = 0 + result.append(myresult) + return result + # there is no topology + print("notopo") + return [zeros(3)] + +#test data consist on a vector of observations and a label vector of all valid +# action to this observations +def generate_testdata(number, dimension, maxclients, topoplogies, sortValues): + data = [] + labels = [] + for i in range(number): + for topology in topologies: + index = np.array(range(dimension), dtype=np.int16) + clients = np.random.randint(maxclients, size=dimension) + mydata = np.vstack((clients, topology, index)).transpose() + labeldata = mydata + #if sortValues: + # labeldata = np.sort(labeldata.view('i4,i4'), order=['f0', 'f1'], axis=0).view(np.int) + clients = labeldata[:,0] + topology = labeldata[:,1].tolist() + #mydata = np.reshape(mydata, [1, 2*dimension]) + mydata = np.delete(mydata, 2, axis=1) + data.append(mydata) + #labels.append(channelvectors_to_label(calculate_all_best_action(clients, topology))) + labels.append(calculate_all_best_action(clients, topology)) + return [data, labels] + +def normalize_state(state, ob_space, s_size): + global sortedIndecies + state = np.array(state) + + #sort states + index = np.arange(state.shape[0]) + index = index.reshape((-1,1)) + state = np.concatenate((state, index), axis=1) + # + if SORT_VALUES: + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + sortedIndecies = state[:,-1] + state = np.delete(state, -1, axis=1) + state = np.reshape(state, [1, s_size]) + obspacehigh = np.reshape(ob_space.high, [1, s_size]) + #state = state *2 / obspacehigh - 1 + state = state -1 + + return state + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) + #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) + #action[np.where(sortedIndecies == index)[0]] = ifaceaction + action[sortedIndecies[index]] = ifaceaction + return action + +def eval(clients): + errorcounter_cli = 0 + errorcounter_ap = 0 + counter = 0 + errorlog = "" + + for client in clients: + ap = client['aps'] + state_cli = np.array([client['clients'], ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) + #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) + #actionvectorap = map_action(actionap) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + #success_ap = False + #for tmp in client['valid']: + # tmpval = True + # for a, b in zip(actionvectorap, tmp): + # if a != b: + # tmpval = False + # break + # if tmpval: + # success_ap = True + # break + + output = "[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + " SortedID: " +str(sortedIndecies) + print(output) + #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) + counter += 1 + + #if not success_ap: + # errorcounter_ap +=1 + + if not success_cli: + errorcounter_cli +=1 + errorlog += output +"\n" + + print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter*100) + "%)") + #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") + print(errorlog) + +def calculate_reward(clients_p_ap, action): + reward = 0 + + for ap in range(len(action)): + channel = action[ap] + + #search num aps on same channel + same_chan = 0 + for act in action: + if act == channel: + same_chan += 1 + + ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan + reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) + return reward + +def get_best_reward(client, ap): + state_cli = np.array([client, ap]) + #state_ap = np.array([ap, client['clients']]) + + state_cli = state_cli.transpose() + #state_ap = state_ap.transpose() + + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + reward = calculate_reward(client, actionvector) + return reward + +def eval_handover(client, new_clients): + print("Current state:") + ap = client['aps'] + state_cli = np.array([client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + + success_cli = False + for tmp in client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + reward = get_best_reward(client['clients'], ap) + + print("Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + + print("Handover simulation") + for new_client in new_clients: + ap = new_client['aps'] + state_cli = np.array([new_client['clients'], ap]) + + state_cli = state_cli.transpose() + state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) + action = np.argmax(model.predict(state_cli_norm)[0]) + actionvector = map_action(action) + reward = calculate_reward(new_client['clients'], actionvector) + + success_cli = False + for tmp in new_client['valid']: + tmpval = True + for a, b in zip(actionvector, tmp): + if a != b: + tmpval = False + break + if tmpval: + success_cli = True + break + + print("Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) + + +ac_space = spaces.MultiDiscrete([2,2,2]) +ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) +#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" +trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_varSetneuronalesNetz.train" + +#generate random test data +[test_data, test_labels] = generate_testdata(N_test, aps, maxclients, topologies, SORT_VALUES) + +clients = [] +for elem, label in zip(test_data, test_labels): + clients.append({'clients': elem[:,0], 'aps': elem[:,1], 'valid':label}) + +#clients = [ {'clients': [1, 1, 2], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, +# {'clients': [1, 1, 1], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, +# {'clients': [1, 1, 5], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, +# {'clients': [1, 3, 2], 'aps': [2,2,2], 'valid':[[0,1,0], [1,0,1]]}, +# {'clients': [5, 3, 4], 'aps': [2,2,2], 'valid':[[1,0,0], [0,1,1]]}, +# {'clients': [5, 1, 3], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, +# {'clients': [2, 4, 2], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, +# {'clients': [7, 1, 5], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, +# {'clients': [4, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, +# {'clients': [1, 1, 1], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, +# {'clients': [1, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]} +# ] +#clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} +# ] +handover = [{'clients': [1, 5, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]} + ] + +handover2 = [{'clients': [1, 5, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [2, 4, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1, 4, 2], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]} + ] + +#handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, +# {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, +# {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} +# ] + +handover3 = [{'clients': [2, 2, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, + {'clients': [2, 1, 2], 'aps': [1,2,1], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, + {'clients': [1, 2, 2], 'aps': [1,2,1], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} + ] + +#aps = [[2,2,2]] +#aps2 = [[1,2,1]] + +#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], +# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], +# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] +#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], +# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], +# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] + +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.nvec) + +tmps_size = ob_space.shape +s_size = tmps_size[0] * tmps_size[1] +#s_size = list(map(lambda x: x * ob_space.high, s_size)) +a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + +print("observation_space size:" + str(s_size)) +print("Data: Trained Data of different settings with sorting agent. Experiment 3") + +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) +#model.add(keras.layers.Dense(5, activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) +model.load_weights(trainingfile) + +#modelap = keras.Sequential() +#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) +#modelap.add(keras.layers.Dense(5, activation='relu')) +#modelap.add(keras.layers.Dense(a_size, activation='softmax')) +#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), +# loss='categorical_crossentropy', +# metrics=['accuracy']) +#modelap.load_weights(trainingfileap) + +print("\nSame domain:") +eval(clients) + +#print("\nMan in the middle:") +#eval(clients2, aps2) + +print("\nHandover test") +eval_handover(handover[0], handover[1:]) + +print("\nHandover test 2") +eval_handover(handover2[0], handover2[1:]) + +print("\nHandover test 3") +eval_handover(handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_multi.py b/openAI_RRM/rrm_agent_multi.py new file mode 100644 index 0000000..014b1be --- /dev/null +++ b/openAI_RRM/rrm_agent_multi.py @@ -0,0 +1,467 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +import tensorflow as tf +import tensorflow.contrib.slim as slim +import numpy as np +from tensorflow import keras +import argparse +import logging +import time +import csv +import os +import json +from math import * +from scipy.optimize import fsolve +import pickle +import datetime +from functools import reduce + +AVGTIME_ONEVALUE_RAND = 2 +RANDVALUE_FIRST_EPISODE = 0.9 +REWARD_INIT = 0.00001 +SORT_VALUES = True +SCENARIOS = 180 +EPSILON_MAX_DECAY = 0.95 +EPSILON_MIN = 0.01 +ACTIVATE_OBSERVER = False + +sortedIndecies = [] +ac_space = [] +currentScenario = 0 + +def normalize_state(state, ob_space, s_size): + global sortedIndecies + state = np.array(state) + #sort states + index = np.arange(state.shape[0]) + index = index.reshape((-1,1)) + state = np.concatenate((state, index), axis=1) + #sort input and output if configured + if SORT_VALUES: + state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) + #print("state" + str(state)) + sortedIndecies = state[:,-1] + #print(sortedIndecies) + state = np.delete(state, -1, axis=1) + + state = np.reshape(state, [1, s_size]) + # obspacehigh = np.reshape(ob_space.high, [1, s_size]) + state = state - 1 #*2 / obspacehigh - 1 + + return state + +def guess_random_numbers_in_firstEpisode(a_size): + return AVGTIME_ONEVALUE_RAND * a_size * SCENARIOS#**2 + +def guess_steps(a_size): + stepidea = guess_random_numbers_in_firstEpisode(a_size) / RANDVALUE_FIRST_EPISODE + #scale to multiple of scenario + stepidea = int(stepidea / SCENARIOS) * SCENARIOS + return stepidea + +def guess_epsilon_decay(steps, a_size): + func = lambda epsilon_decay: guess_random_numbers_in_firstEpisode(a_size) - (1-epsilon_decay**(steps + 1)) / (1 - epsilon_decay) + return fsolve(func, 0.9999999999)[0] + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) + #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) + #action[np.where(sortedIndecies == index)[0]] = ifaceaction + action[sortedIndecies[index]] = ifaceaction + return action + +def reset_rewards(): + global maxreward + global minreward; + for i in range(SCENARIOS): + maxreward[i] = REWARD_INIT + minreward[i] = np.inf + return + +def normalize_reward(reward, rewardpow, action): + global maxreward + global minreward; + global lastreward; + global currentScenario; + + orig = reward + + minreward[currentScenario] = min(reward, minreward[currentScenario]) + reward -= minreward[currentScenario] + + maxreward[currentScenario] = max(reward, maxreward[currentScenario]) + reward /= maxreward[currentScenario] + + print("reward:" + str(orig) + ", minreward:" + str(minreward[currentScenario]) + ", maxreward:" +str(maxreward[currentScenario]) + ", at scenario" + str(currentScenario)) + + #set reward to 1.0 if it is first value + if maxreward[currentScenario] == REWARD_INIT: + reward = 1.0 + + reward = pow(reward, rewardpow) + + #hysteresis + if action != lastaction[currentScenario] and abs(reward - lastreward[currentScenario]) < 0.1: + reward *= 0.9 + lastaction[currentScenario] = action + lastreward[currentScenario] = reward + + return reward + +lastreward = np.zeros(SCENARIOS) +minreward = np.zeros(SCENARIOS) +maxreward = np.zeros(SCENARIOS) +lastaction = np.zeros(SCENARIOS) + + +parser = argparse.ArgumentParser(description='Uniflex reader') +parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--output', help='path to a csv file for agent output data', default=None) +parser.add_argument('--plot', help='activate plotting', default=None) +parser.add_argument('--steptime', help='interval between two steps', default=1) +#parser.add_argument('--steps', help='number of steps per episode. If not set, the agents runs infinitly long', default=None) +parser.add_argument('--episodes', help='number of episodes in this execution. If not set, the agents runs infinitly long', default=None) +parser.add_argument('--startepisode', help='The episode we start with', default=1) +parser.add_argument('--trainingfile', help='file to load and store training data', default=None) +parser.add_argument('--cpus', help='Numbers of cpus for this process', default=1) + +args = parser.parse_args() +if not args.config: + print("No config file specified!") + os._exit(1) +if not args.output: + print("No output file specified! - Skip data") +if not args.trainingfile: + print("No training file specified! - Start with unlearned agent") + +if args.plot: + import matplotlib.pyplot as plt + +print("Start at episode " + str(args.startepisode)) + +#create uniflex environment, steptime is 10sec +env = gym.make('uniflex-v0') +#env.configure() +env.start_controller(steptime=float(args.steptime), config=args.config) + +epsilon_max = 1.0 # exploration rate +#epsilon_decay = 0.99 +#epsilon_decay = 0.995 + +time_history = [] +rew_history = [] + +numChannels = 2 + +observerData = [] +observerCounter = 0 + +while True: + + state = env.reset() + currentScenario = 0 + + n = 0 + ac_space = env.action_space + ob_space = env.observation_space + + print("reset agent") + print("Observation space: ", ob_space, ob_space.dtype) + print("Action space: ", ac_space, ac_space.nvec) + + tmps_size = ob_space.shape + s_size = tmps_size[0] * tmps_size[1] + #s_size = list(map(lambda x: x * ob_space.high, s_size)) + a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + + if a_size == 0: + print("there is no vaild AP - sleep 2 seconds") + time.sleep(2) + continue + + print("observation_space size:" + str(s_size)) + + state = normalize_state(state, ob_space, s_size) + + model = keras.Sequential() + model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) + #model.add(keras.layers.Dense(5, activation='relu')) + model.add(keras.layers.Dense(a_size, activation='softmax')) + model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) + + config = tf.ConfigProto() + config.intra_op_parallelism_threads = int(args.cpus) + config.inter_op_parallelism_threads = int(args.cpus) + tf.Session(config=config) + + print("State (Observation) of System" + str(state)) + try: + state = np.reshape(state, [1, s_size]) + except ValueError: + continue + rewardsum = 0 + + steps = guess_steps(a_size) + epsilon_decay = guess_epsilon_decay(steps, a_size) + print("Initialize agent. Exploration rate is " + str(epsilon_decay) + + ", an episode has at most " + str(steps) + " steps") + + rewardpow = int(log(a_size, 2)) + + episode = 1 + reset_rewards() + + if args.trainingfile and not os.path.isfile(args.trainingfile): + try: + model.load_weights(args.trainingfile) + print("Load model") + except ValueError: + print("Spaces does not match") + except tf.errors.NotFoundError: + print("File not found. Skip loading") + + try: + with open(args.trainingfile + '.var', 'r') as f: # Python 3: open(..., 'wb') + temp = json.loads(f.read()) + lastreward = np.array(temp['lastreward']) + minreward = np.array(temp['minreward']) + maxreward = np.array(temp['maxreward']) + lastaction = np.array(temp['lastaction']) + print("Load reward values of last run") + print("lastreward: " + str(lastreward)) + print("minreward: " + str(minreward)) + print("maxreward: " + str(maxreward)) + print("lastaction: " + str(lastaction)) + except ValueError as e: + print("File format is wrong" + str(e)) + except IOError as e: + print("File not found. Skip loading" + str(e)) + + + while episode < int(args.startepisode): + epsilon_max *= EPSILON_MAX_DECAY + epsilon_max = max(epsilon_max, EPSILON_MIN)#max(pow(epsilon_max, 3), EPSILON_MIN) + episode += 1 + + # Schleife für Episoden + while True: + print("start episode") + + run = 0 + runs = [] + rewards = [] + actions = [] + + epsilon = epsilon_max + epsilon_max *= EPSILON_MAX_DECAY + epsilon_max = max(epsilon_max, EPSILON_MIN)#max(pow(epsilon_max, 3), EPSILON_MIN) + done = False + + aps = int(log(a_size, numChannels)) + + #for i in range(0, aps): + # actions.append([]) + + state = env.reset() + state_orig = state + currentScenario = 0 + state = normalize_state(state, ob_space, s_size) + + while not done: + # Choose action + ts = time.time() + print("\nnew step at " + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S,%f')) + print ("Run: " + str(run) + ", Episode: " + str(episode) + ", Scenario: " + str(currentScenario)) + print("Observation:" + str(state_orig)) + + if np.random.rand(1) < epsilon: + action = np.random.randint(a_size) + else: + action = np.argmax(model.predict(state)[0]) + + actionvector = map_action(action) + + print("Action:" +str(action) + ", Actionvector" + str(actionvector)) + + # Step + next_state, reward, done, _ = env.step(actionvector) + + reward = normalize_reward(reward, rewardpow, action) + + + if done: + # print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" + # .format(e, total_episodes, time, rewardsum, epsilon)) + reset_rewards() + print("setting changes") + break + + state_orig = next_state + + next_state = normalize_state(next_state, ob_space, s_size) + + # Train + target = reward + if not done: + target = (reward)# + 0.95 * np.amax(model.predict(next_state)[0])) + + print("Scaled reward: " + str(target)) + + target_f = model.predict(state) + print("agent learning" + str(target_f)) + target_f[0][action] = target + print("agent new learning" + str(target_f)) + history = model.fit(state, target_f, epochs=1, verbose=0) + + # observer: Observe states in neural network + # if there is no change of the loss function within 10 steps and epsilon_max < 0.5 + # then stop the execution. It detects changes within 0.01 + if ACTIVATE_OBSERVER: + observerData.append(history.history['loss'][0]) + observerCounter += 1 + if(observerCounter > 10): + observerCounter = 10 + observerData.pop(0) + avg = reduce(lambda x, y: x + y, observerData) / len(observerData) + indata = list(map(lambda x: abs(x-avg) < 0.01, observerData)) + complete = reduce(lambda x, y: x and y, indata) + if complete and epsilon_max < 0.5: + print("Accuracy: " + str(history.history['acc'][0])) + print("Network is trained - exit") + os._exit(0) + + #rewardsum += reward + if epsilon > EPSILON_MIN: epsilon *= epsilon_decay + + #rewards.append(reward) + + if args.output: + with open(args.output, 'a') as csvFile: + writer = csv.writer(csvFile) + writer.writerow([reward, action, episode,currentScenario]) + csvFile.close() + + #for ap in range(0, aps): + # ifaceaction = int(action / (pow(numChannels, ap))) + # ifaceaction = ifaceaction % numChannels + # actions[ap].append(ifaceaction) + + print ("Reward: " + str(reward)) + print ("GameOver: " + str(done)) + #print ("State: " + str(state)) + #print ("Channel selection:" + str(action)) + + state = next_state + + if args.plot: + plt.subplot(211) + plt.plot(run, reward, 'bo') # Additional point + plt.ylabel('reward') + plt.subplot(212) + #for ap in range(0, aps): + # plt.plot(actions[ap]) + plt.plot(run, action, 'bo') # Additional point + plt.ylabel('action') + plt.xlabel('step') + plt.pause(0.05) + + currentScenario += 1 + if currentScenario >= SCENARIOS: + currentScenario = 0 + + run += 1 + + # next episode if enough steps, if enough episodes -> exit + # store model and internal states on change of episode + if steps <= run: + if args.trainingfile: + model.save_weights(args.trainingfile) + with open(args.trainingfile + '.var', 'w') as f: # Python 3: open(..., 'wb') + f.write(json.dumps({ + 'lastreward' : lastreward.tolist(), + 'minreward' : minreward.tolist(), + 'maxreward' : maxreward.tolist(), + 'lastaction' : lastaction.tolist() + })) + if args.episodes and int(args.episodes) <= episode: + os._exit(1) + else: + break + + episode += 1 + + +''' +ob_space = env.observation_space +ac_space = env.action_space +print("Observation space: ", ob_space, ob_space.dtype) +print("Action space: ", ac_space, ac_space.n) + +s_size = ob_space.shape[0] +a_size = ac_space.n +model = keras.Sequential() +model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) +model.add(keras.layers.Dense(a_size, activation='softmax')) +model.compile(optimizer=tf.train.AdamOptimizer(0.001), + loss='categorical_crossentropy', + metrics=['accuracy']) + +total_episodes = 200 +max_env_steps = 100 +env._max_episode_steps = max_env_steps + +epsilon = 1.0 # exploration rate +EPSILON_MIN = 0.01 +epsilon_decay = 0.999 + +time_history = [] +rew_history = [] + +for e in range(total_episodes): + + state = env.reset() + state = np.reshape(state, [1, s_size]) + rewardsum = 0 + for time in range(max_env_steps): + # Choose action + if np.random.rand(1) < epsilon: + action = np.random.randint(a_size) + else: + action = np.argmax(model.predict(state)[0]) + + # Step + next_state, reward, done, _ = env.step(action) + + if done: + print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" + .format(e, total_episodes, time, rewardsum, epsilon)) + break + + next_state = np.reshape(next_state, [1, s_size]) + + # Train + target = reward + if not done: + target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) + + target_f = model.predict(state) + target_f[0][action] = target + model.fit(state, target_f, epochs=1, verbose=0) + + state = next_state + rewardsum += reward + if epsilon > EPSILON_MIN: epsilon *= epsilon_decay + + time_history.append(time) + rew_history.append(rewardsum) +''' From a2d3e4cea72b339f4ee8a169a7b13088cbe9579f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 29 Jul 2019 10:08:31 +0200 Subject: [PATCH 47/54] clean up --- .../Setting2_2/config_slave.yaml | 4 +- .../Setting2_2/config_slave2.yaml | 4 +- .../Setting2_2unsym/config_slave.yaml | 4 +- .../Setting2_2unsym/config_slave2.yaml | 4 +- .../Setting3_112/config_slave.yaml | 7 +- .../Setting3_112/config_slave2.yaml | 7 +- .../Setting3_112/config_slave3.yaml | 7 +- .../Setting3_222/config_slave.yaml | 6 +- .../Setting3_222/config_slave2.yaml | 7 +- .../Setting3_222/config_slave3.yaml | 8 +- .../Setting8_22213122/config_slave.yaml | 4 +- .../Setting8_22213122/config_slave2.yaml | 4 +- .../Setting8_22213122/config_slave3.yaml | 4 +- .../Setting8_22213122/config_slave4.yaml | 4 +- .../Setting8_22213122/config_slave5.yaml | 4 +- .../Setting8_22213122/config_slave6.yaml | 4 +- .../Setting8_22213122/config_slave7.yaml | 4 +- .../Setting8_22213122/config_slave8.yaml | 4 +- openAI_RRM/channel_controller.py | 4 +- openAI_RRM/rrm_agent.py | 70 ------------ openAI_RRM/rrm_agent_multi.py | 66 ----------- openAI_RRM/thompson_agent.py | 106 +++--------------- 22 files changed, 62 insertions(+), 274 deletions(-) diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml index 88ebedd..02927a2 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml index 4f59241..e0a1363 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12"], - 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml index 37610b2..6bedbd9 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml @@ -29,9 +29,9 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:01', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault': 54000000, 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], - 'txBytesRandom':0}} + 'txBytesRandom':0, 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml index 84870f7..1e3f80f 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml @@ -29,9 +29,9 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:11"], - 'neighbors' : ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], + 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:02', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault': 54000000, 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], - 'txBytesRandom':0}} + 'txBytesRandom':0, 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index a326d49..febc7c5 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -29,10 +29,9 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], - 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1', - 'mode': 'training', # training or working - 'numsClients': [6,1,2,1,1] + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, + 'mode': 'single' }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index 06bbaed..a06c215 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -29,10 +29,9 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], - 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], + 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2', - 'mode': 'training', # training or working - 'numsClients': [0,1,2,1,2] + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, + 'mode': 'single' }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index a903689..1b969d3 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -29,10 +29,9 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], - 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03']], + 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01']], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3', - 'mode': 'training', # training or working - 'numsClients': [0,5,2,2,3] + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, + 'mode': 'single' # training or working }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml index b494c57..8922509 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml @@ -29,9 +29,9 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], - 'neighbors' : [['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02']], + 'neighbors' : [['aa:aa:aa:aa:aa:02']], 'myMAC' : 'aa:aa:aa:aa:aa:01', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, - 'mode': 'training', # training or working - 'numsClients': [1,1,5,1,2]}} + 'mode': 'singel', # training or working + 'clientnum': 2}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml index e6ad37c..9994b38 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml @@ -29,10 +29,11 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], - 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03']], + 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:02', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, - 'mode': 'training', # training or working - 'numsClients': [5,2,1,1,2] + , + 'mode': 'singel', # training or working + 'clientnum': 2 }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml index 45bf91c..db7eff0 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml @@ -29,10 +29,10 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], - 'neighbors' : [['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02']], + 'neighbors' : [['aa:aa:aa:aa:aa:02']], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, - 'mode': 'training', # training or working - 'numsClients': [1,3,5,4,2] + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, + 'mode': 'singel', # training or working + 'clientnum': 2 }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml index f20b051..a4f43f5 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:01:01", "cc:cc:cc:cc:01:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:04'], + 'neighbors' : [['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:04']], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml index bf9c7bc..6130370 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:02:01", "cc:cc:cc:cc:02:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:01','aa:aa:aa:aa:aa:03','aa:aa:aa:aa:aa:05'], + 'neighbors' : [['aa:aa:aa:aa:aa:01','aa:aa:aa:aa:aa:03','aa:aa:aa:aa:aa:05']], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml index 6fa666c..ea4a5ab 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:03:01", "cc:cc:cc:cc:03:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:06'], + 'neighbors' : [['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:06']], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml index 07c1c6d..b15a2ba 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:04:01"], - 'neighbors' : ['aa:aa:aa:aa:aa:01','aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:07'], + 'neighbors' : [['aa:aa:aa:aa:aa:01','aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:07']], 'myMAC' : 'aa:aa:aa:aa:aa:04', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml index b936e5b..8a49183 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:05:01", "cc:cc:cc:cc:05:02", "cc:cc:cc:cc:05:03"], - 'neighbors' : ['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:04','aa:aa:aa:aa:aa:06','aa:aa:aa:aa:aa:08'], + 'neighbors' : [['aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:04','aa:aa:aa:aa:aa:06','aa:aa:aa:aa:aa:08']], 'myMAC' : 'aa:aa:aa:aa:aa:05', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':3, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml index 33091ee..4258694 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:06:01"], - 'neighbors' : ['aa:aa:aa:aa:aa:03','aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:08'], + 'neighbors' : [['aa:aa:aa:aa:aa:03','aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:08']], 'myMAC' : 'aa:aa:aa:aa:aa:06', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml index be6e611..f29af52 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:07:01", "cc:cc:cc:cc:07:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:04', 'aa:aa:aa:aa:aa:08'], + 'neighbors' : [['aa:aa:aa:aa:aa:04', 'aa:aa:aa:aa:aa:08']], 'myMAC' : 'aa:aa:aa:aa:aa:07', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000]}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'channelThroughput':[54000000, 54000000, 54000000, 54000000, 26000000], 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml index 105c16e..6d830b0 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml @@ -29,7 +29,7 @@ modules: class_name : SimpleModule4 devices : ['phy0'] kwargs : { "clients" : ["cc:cc:cc:cc:08:01", "cc:cc:cc:cc:08:02"], - 'neighbors' : ['aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:06','aa:aa:aa:aa:aa:07'], + 'neighbors' : [['aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:06','aa:aa:aa:aa:aa:07']], 'myMAC' : 'aa:aa:aa:aa:aa:08', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0}} + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'mode': 'single'}} diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 7edc8ad..822c782 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -512,8 +512,8 @@ def reset(self): for iface in device['interfaces']: self.set_channel( node['uuid'], device['uuid'], iface, channel, None) - channel += 5 - if channel > 12: + channel += 6 + if channel > 14: channel = 1 # fill obsersavion buffer diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rrm_agent.py index 23bc31d..fb39828 100644 --- a/openAI_RRM/rrm_agent.py +++ b/openAI_RRM/rrm_agent.py @@ -146,10 +146,6 @@ def normalize_reward(reward, rewardpow, action): epsilon_max = 1.0 # exploration rate epsilon_min = 0.01 #epsilon_decay = 0.99 -epsilon_decay = 0.995 - -time_history = [] -rew_history = [] numChannels = 2 @@ -356,69 +352,3 @@ def normalize_reward(reward, rewardpow, action): break episode += 1 - - -''' -ob_space = env.observation_space -ac_space = env.action_space -print("Observation space: ", ob_space, ob_space.dtype) -print("Action space: ", ac_space, ac_space.n) - -s_size = ob_space.shape[0] -a_size = ac_space.n -model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) -model.add(keras.layers.Dense(a_size, activation='softmax')) -model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) - -total_episodes = 200 -max_env_steps = 100 -env._max_episode_steps = max_env_steps - -epsilon = 1.0 # exploration rate -epsilon_min = 0.01 -epsilon_decay = 0.999 - -time_history = [] -rew_history = [] - -for e in range(total_episodes): - - state = env.reset() - state = np.reshape(state, [1, s_size]) - rewardsum = 0 - for time in range(max_env_steps): - # Choose action - if np.random.rand(1) < epsilon: - action = np.random.randint(a_size) - else: - action = np.argmax(model.predict(state)[0]) - - # Step - next_state, reward, done, _ = env.step(action) - - if done: - print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" - .format(e, total_episodes, time, rewardsum, epsilon)) - break - - next_state = np.reshape(next_state, [1, s_size]) - - # Train - target = reward - if not done: - target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) - - target_f = model.predict(state) - target_f[0][action] = target - model.fit(state, target_f, epochs=1, verbose=0) - - state = next_state - rewardsum += reward - if epsilon > epsilon_min: epsilon *= epsilon_decay - - time_history.append(time) - rew_history.append(rewardsum) -''' diff --git a/openAI_RRM/rrm_agent_multi.py b/openAI_RRM/rrm_agent_multi.py index 014b1be..84e6c94 100644 --- a/openAI_RRM/rrm_agent_multi.py +++ b/openAI_RRM/rrm_agent_multi.py @@ -399,69 +399,3 @@ def normalize_reward(reward, rewardpow, action): break episode += 1 - - -''' -ob_space = env.observation_space -ac_space = env.action_space -print("Observation space: ", ob_space, ob_space.dtype) -print("Action space: ", ac_space, ac_space.n) - -s_size = ob_space.shape[0] -a_size = ac_space.n -model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) -model.add(keras.layers.Dense(a_size, activation='softmax')) -model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) - -total_episodes = 200 -max_env_steps = 100 -env._max_episode_steps = max_env_steps - -epsilon = 1.0 # exploration rate -EPSILON_MIN = 0.01 -epsilon_decay = 0.999 - -time_history = [] -rew_history = [] - -for e in range(total_episodes): - - state = env.reset() - state = np.reshape(state, [1, s_size]) - rewardsum = 0 - for time in range(max_env_steps): - # Choose action - if np.random.rand(1) < epsilon: - action = np.random.randint(a_size) - else: - action = np.argmax(model.predict(state)[0]) - - # Step - next_state, reward, done, _ = env.step(action) - - if done: - print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" - .format(e, total_episodes, time, rewardsum, epsilon)) - break - - next_state = np.reshape(next_state, [1, s_size]) - - # Train - target = reward - if not done: - target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) - - target_f = model.predict(state) - target_f[0][action] = target - model.fit(state, target_f, epochs=1, verbose=0) - - state = next_state - rewardsum += reward - if epsilon > EPSILON_MIN: epsilon *= epsilon_decay - - time_history.append(time) - rew_history.append(rewardsum) -''' diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 68ba3e7..0bfb710 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -32,28 +32,28 @@ if args.plot: import matplotlib.pyplot as plt +ac_space = [] -#create uniflex environment, steptime is 10sec +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + action[index] = ifaceaction + return action + + +#create uniflex environment env = gym.make('uniflex-v0') #env.configure() env.start_controller(steptime=float(args.steptime), config=args.config) -epsilon = 1.0 # exploration rate -epsilon_min = 0.01 -#epsilon_decay = 0.99 -epsilon_decay = 0.995 - -time_history = [] -rew_history = [] - numChannels = 2 episode = 1 while True: run = 0 - runs = [] - rewards = [] - actions = [] state = env.reset() n = 0 @@ -62,7 +62,7 @@ print("Observation space: ", ob_space, ob_space.dtype) print("Action space: ", ac_space, ac_space.n) - a_size = int(ac_space.n) + a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) avg = [] num = [] @@ -89,11 +89,12 @@ for i in range(a_size): randval.append(np.random.normal(avg[i]/maxreward, 1/(pow(num[i],1) + 1), 1)) - #take index of highest value + # take index of highest value action = np.argmax(randval) #execute step - next_state, reward, done, _ = env.step(action) + actionVector = map_action + next_state, reward, done, _ = env.step(actionVector) #hysteresis if action != lastaction and abs(reward - lastreward) < 0.1: @@ -108,19 +109,12 @@ maxreward = np.maximum(maxreward, reward) # statistics - rewards.append(reward) - if args.output: with open(args.output, 'a') as csvFile: writer = csv.writer(csvFile) writer.writerow([reward, action, episode]) csvFile.close() - for ap in range(0, aps): - ifaceaction = int(action / (pow(numChannels, ap))) - ifaceaction = ifaceaction % numChannels - #actions[ap].append(ifaceaction) - print ("Reward: " + str(reward)) print ("GameOver: " + str(done)) print ("Next Channels: " + str(next_state)) @@ -133,8 +127,6 @@ plt.plot(run, reward, 'bo') # Additional point plt.ylabel('reward') plt.subplot(212) - #for ap in range(0, aps): - # plt.plot(actions[ap]) plt.plot(run, action, 'bo') # Additional point plt.ylabel('action') plt.xlabel('step') @@ -146,69 +138,3 @@ os._exit(1) episode += 1 - - -''' -ob_space = env.observation_space -ac_space = env.action_space -print("Observation space: ", ob_space, ob_space.dtype) -print("Action space: ", ac_space, ac_space.n) - -s_size = ob_space.shape[0] -a_size = ac_space.n -model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) -model.add(keras.layers.Dense(a_size, activation='softmax')) -model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) - -total_episodes = 200 -max_env_steps = 100 -env._max_episode_steps = max_env_steps - -epsilon = 1.0 # exploration rate -epsilon_min = 0.01 -epsilon_decay = 0.999 - -time_history = [] -rew_history = [] - -for e in range(total_episodes): - - state = env.reset() - state = np.reshape(state, [1, s_size]) - rewardsum = 0 - for time in range(max_env_steps): - # Choose action - if np.random.rand(1) < epsilon: - action = np.random.randint(a_size) - else: - action = np.argmax(model.predict(state)[0]) - - # Step - next_state, reward, done, _ = env.step(action) - - if done: - print("episode: {}/{}, time: {}, rew: {}, eps: {:.2}" - .format(e, total_episodes, time, rewardsum, epsilon)) - break - - next_state = np.reshape(next_state, [1, s_size]) - - # Train - target = reward - if not done: - target = (reward + 0.95 * np.amax(model.predict(next_state)[0])) - - target_f = model.predict(state) - target_f[0][action] = target - model.fit(state, target_f, epochs=1, verbose=0) - - state = next_state - rewardsum += reward - if epsilon > epsilon_min: epsilon *= epsilon_decay - - time_history.append(time) - rew_history.append(rewardsum) -''' From 27b7a0bdfba443d8ab5a8515262fb992a4d09eee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 29 Jul 2019 22:18:35 +0200 Subject: [PATCH 48/54] debug --- .../Setting2_2/config_slave.yaml | 6 +- .../Setting2_2/config_slave2.yaml | 6 +- .../Setting3_222/config_slave.yaml | 6 +- .../Setting3_222/config_slave2.yaml | 1 - openAI_RRM/config_master_simulation.yaml | 8 +- openAI_RRM/rrm_agent_multi.py | 8 +- openAI_RRM/thompson_agent.py | 10 +- openAI_RRM/thompson_agent_hysteresis.py | 140 ++++++++++++++++++ .../thompson_agent_hysteresis_schnell.py | 140 ++++++++++++++++++ 9 files changed, 302 insertions(+), 23 deletions(-) create mode 100644 openAI_RRM/thompson_agent_hysteresis.py create mode 100644 openAI_RRM/thompson_agent_hysteresis_schnell.py diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml index 02927a2..f41a604 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml index e0a1363..1d52f8b 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml index 8922509..b7a42ce 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml index 9994b38..2c58b54 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml @@ -32,7 +32,6 @@ modules: 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:02', 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, - , 'mode': 'singel', # training or working 'clientnum': 2 }} diff --git a/openAI_RRM/config_master_simulation.yaml b/openAI_RRM/config_master_simulation.yaml index d39683d..cad4e10 100644 --- a/openAI_RRM/config_master_simulation.yaml +++ b/openAI_RRM/config_master_simulation.yaml @@ -7,9 +7,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xpub: "tcp://127.0.0.1:8990" - xsub: "tcp://127.0.0.1:8989" +#broker: +# xpub: "tcp://127.0.0.1:8990" +# xsub: "tcp://127.0.0.1:8989" control_applications: myController: @@ -21,7 +21,7 @@ control_applications: 'steptime' : 1, 'simulation': True, 'mode': 'training', # training or working - 'scenarios': 180, + 'scenarios': 1, 'order': ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:02','aa:aa:aa:aa:aa:03', 'aa:aa:aa:aa:aa:04', 'aa:aa:aa:aa:aa:05','aa:aa:aa:aa:aa:06', 'aa:aa:aa:aa:aa:07','aa:aa:aa:aa:aa:08'] } diff --git a/openAI_RRM/rrm_agent_multi.py b/openAI_RRM/rrm_agent_multi.py index 84e6c94..218879d 100644 --- a/openAI_RRM/rrm_agent_multi.py +++ b/openAI_RRM/rrm_agent_multi.py @@ -20,10 +20,10 @@ from functools import reduce AVGTIME_ONEVALUE_RAND = 2 -RANDVALUE_FIRST_EPISODE = 0.9 +RANDVALUE_FIRST_EPISODE = 0.7 REWARD_INIT = 0.00001 -SORT_VALUES = True -SCENARIOS = 180 +SORT_VALUES = False +SCENARIOS = 1 EPSILON_MAX_DECAY = 0.95 EPSILON_MIN = 0.01 ACTIVATE_OBSERVER = False @@ -180,7 +180,7 @@ def normalize_reward(reward, rewardpow, action): tmps_size = ob_space.shape s_size = tmps_size[0] * tmps_size[1] #s_size = list(map(lambda x: x * ob_space.high, s_size)) - a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) if a_size == 0: print("there is no vaild AP - sleep 2 seconds") diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 0bfb710..2c6ce74 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -60,9 +60,9 @@ def map_action(mappedAction): ac_space = env.action_space ob_space = env.observation_space print("Observation space: ", ob_space, ob_space.dtype) - print("Action space: ", ac_space, ac_space.n) + print("Action space: ", ac_space, ac_space.nvec) - a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) + a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) avg = [] num = [] @@ -93,12 +93,12 @@ def map_action(mappedAction): action = np.argmax(randval) #execute step - actionVector = map_action + actionVector = map_action(action) next_state, reward, done, _ = env.step(actionVector) #hysteresis - if action != lastaction and abs(reward - lastreward) < 0.1: - reward = reward * 0.75 + #if action != lastaction and abs(reward - lastreward) < 0.1: + # reward = reward * 0.75 lastaction = action lastreward = reward diff --git a/openAI_RRM/thompson_agent_hysteresis.py b/openAI_RRM/thompson_agent_hysteresis.py new file mode 100644 index 0000000..a8341d5 --- /dev/null +++ b/openAI_RRM/thompson_agent_hysteresis.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +#import tensorflow as tf +#import tensorflow.contrib.slim as slim +import numpy as np +#from tensorflow import keras +import argparse +import logging +import time +import csv +import os +from math import * + + +parser = argparse.ArgumentParser(description='Uniflex reader') +parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--output', help='path to a csv file for agent output data', default=None) +parser.add_argument('--plot', help='activate plotting', default=None) +parser.add_argument('--steptime', help='interval between two steps', default=1) +parser.add_argument('--steps', help='number of steps in this execution. If not set, the agents runs infinitly long', default=None) + +args = parser.parse_args() +if not args.config: + print("No config file specified!") + os._exit(1) +if not args.output: + print("No output file specified! - Skip data") + +if args.plot: + import matplotlib.pyplot as plt + +ac_space = [] + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + action[index] = ifaceaction + return action + + +#create uniflex environment +env = gym.make('uniflex-v0') +#env.configure() +env.start_controller(steptime=float(args.steptime), config=args.config) + +numChannels = 2 +episode = 1 + +while True: + run = 0 + + state = env.reset() + n = 0 + ac_space = env.action_space + ob_space = env.observation_space + print("Observation space: ", ob_space, ob_space.dtype) + print("Action space: ", ac_space, ac_space.nvec) + + a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) + + avg = [] + num = [] + maxreward = 1 + lastreward = 0 + lastaction = 0 + + done = False + + if a_size == 0: + print("there is no vaild AP - sleep 10 seconds") + time.sleep(2) + continue + + aps = int(log(a_size, numChannels)) + + for i in range(a_size): + avg.append(0) + num.append(0) + + while not done: + # generate random values + randval = [] + for i in range(a_size): + randval.append(np.random.normal(avg[i]/maxreward, 1/(pow(num[i],1) + 1), 1)) + + # take index of highest value + action = np.argmax(randval) + + #execute step + actionVector = map_action(action) + next_state, reward, done, _ = env.step(actionVector) + + #hysteresis + if action != lastaction and abs(reward - lastreward) < 0.1: + reward = reward * 0.75 + lastaction = action + lastreward = reward + + # add reward for further execution + avg[action] = (avg[action] * num[action] + reward) / (num[action] + 2) + num[action] += 1 + + maxreward = np.maximum(maxreward, reward) + + # statistics + if args.output: + with open(args.output, 'a') as csvFile: + writer = csv.writer(csvFile) + writer.writerow([reward, action, episode]) + csvFile.close() + + print ("Reward: " + str(reward)) + print ("GameOver: " + str(done)) + print ("Next Channels: " + str(next_state)) + print ("Channel selection:" + str(action)) + print ("Average:" + str(avg)) + print ("next step") + + if args.plot: + plt.subplot(211) + plt.plot(run, reward, 'bo') # Additional point + plt.ylabel('reward') + plt.subplot(212) + plt.plot(run, action, 'bo') # Additional point + plt.ylabel('action') + plt.xlabel('step') + plt.pause(0.05) + + run += 1 + + if args.steps and int(args.steps) <= run: + os._exit(1) + + episode += 1 diff --git a/openAI_RRM/thompson_agent_hysteresis_schnell.py b/openAI_RRM/thompson_agent_hysteresis_schnell.py new file mode 100644 index 0000000..bf6350a --- /dev/null +++ b/openAI_RRM/thompson_agent_hysteresis_schnell.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import gym +import UniFlexGym +#import tensorflow as tf +#import tensorflow.contrib.slim as slim +import numpy as np +#from tensorflow import keras +import argparse +import logging +import time +import csv +import os +from math import * + + +parser = argparse.ArgumentParser(description='Uniflex reader') +parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--output', help='path to a csv file for agent output data', default=None) +parser.add_argument('--plot', help='activate plotting', default=None) +parser.add_argument('--steptime', help='interval between two steps', default=1) +parser.add_argument('--steps', help='number of steps in this execution. If not set, the agents runs infinitly long', default=None) + +args = parser.parse_args() +if not args.config: + print("No config file specified!") + os._exit(1) +if not args.output: + print("No output file specified! - Skip data") + +if args.plot: + import matplotlib.pyplot as plt + +ac_space = [] + +def map_action(mappedAction): + action = np.zeros(len(ac_space.nvec)) + for index in range(len(ac_space.nvec)): + # filter action by the index + ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) + ifaceaction = ifaceaction % ac_space.nvec[0] + action[index] = ifaceaction + return action + + +#create uniflex environment +env = gym.make('uniflex-v0') +#env.configure() +env.start_controller(steptime=float(args.steptime), config=args.config) + +numChannels = 2 +episode = 1 + +while True: + run = 0 + + state = env.reset() + n = 0 + ac_space = env.action_space + ob_space = env.observation_space + print("Observation space: ", ob_space, ob_space.dtype) + print("Action space: ", ac_space, ac_space.nvec) + + a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) + + avg = [] + num = [] + maxreward = 1 + lastreward = 0 + lastaction = 0 + + done = False + + if a_size == 0: + print("there is no vaild AP - sleep 10 seconds") + time.sleep(2) + continue + + aps = int(log(a_size, numChannels)) + + for i in range(a_size): + avg.append(0) + num.append(0) + + while not done: + # generate random values + randval = [] + for i in range(a_size): + randval.append(np.random.normal(avg[i]/maxreward, 1/(pow(num[i],2) + 1), 1)) + + # take index of highest value + action = np.argmax(randval) + + #execute step + actionVector = map_action(action) + next_state, reward, done, _ = env.step(actionVector) + + #hysteresis + if action != lastaction and abs(reward - lastreward) < 0.1: + reward = reward * 0.75 + lastaction = action + lastreward = reward + + # add reward for further execution + avg[action] = (avg[action] * num[action] + reward) / (num[action] + 2) + num[action] += 1 + + maxreward = np.maximum(maxreward, reward) + + # statistics + if args.output: + with open(args.output, 'a') as csvFile: + writer = csv.writer(csvFile) + writer.writerow([reward, action, episode]) + csvFile.close() + + print ("Reward: " + str(reward)) + print ("GameOver: " + str(done)) + print ("Next Channels: " + str(next_state)) + print ("Channel selection:" + str(action)) + print ("Average:" + str(avg)) + print ("next step") + + if args.plot: + plt.subplot(211) + plt.plot(run, reward, 'bo') # Additional point + plt.ylabel('reward') + plt.subplot(212) + plt.plot(run, action, 'bo') # Additional point + plt.ylabel('action') + plt.xlabel('step') + plt.pause(0.05) + + run += 1 + + if args.steps and int(args.steps) <= run: + os._exit(1) + + episode += 1 From f27d41779b7c7fb199cdbe2167636da483ae7903 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Mon, 29 Jul 2019 23:08:38 +0200 Subject: [PATCH 49/54] debug --- openAI_RRM/channel_controller.py | 2 +- openAI_RRM/rrm_agent_multi.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index 822c782..b5b6076 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -554,7 +554,7 @@ def get_observationSpace(self): the maximum is 10 ''' #maxValues = [len(self.availableChannels) for i in self._create_interface_list()] - return spaces.Box(low=0, high=6, shape=(len(self._create_interface_list()),2), dtype=numpy.uint32) + return spaces.Box(low=0, high=numpy.iinfo(numpy.uint32).max, shape=(len(self._create_interface_list()),2), dtype=numpy.uint32) #return spaces.MultiDiscrete(maxValues) #spaces.Box(low=0, high=10000000, shape=(len(self.observationSpace),), dtype=numpy.float32) diff --git a/openAI_RRM/rrm_agent_multi.py b/openAI_RRM/rrm_agent_multi.py index 218879d..4b66c41 100644 --- a/openAI_RRM/rrm_agent_multi.py +++ b/openAI_RRM/rrm_agent_multi.py @@ -49,7 +49,7 @@ def normalize_state(state, ob_space, s_size): state = np.reshape(state, [1, s_size]) # obspacehigh = np.reshape(ob_space.high, [1, s_size]) - state = state - 1 #*2 / obspacehigh - 1 + # state = state - 1 #*2 / obspacehigh - 1 return state @@ -63,7 +63,7 @@ def guess_steps(a_size): return stepidea def guess_epsilon_decay(steps, a_size): - func = lambda epsilon_decay: guess_random_numbers_in_firstEpisode(a_size) - (1-epsilon_decay**(steps + 1)) / (1 - epsilon_decay) + func = lambda epsilon_decay: guess_random_numbers_in_firstEpisode(a_size) - (1-epsilon_decay**(steps )) / (1 - epsilon_decay) return fsolve(func, 0.9999999999)[0] def map_action(mappedAction): From eb46883b4e481dfe0c4114f9cd7c9095d6edbede Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 30 Jul 2019 10:40:05 +0200 Subject: [PATCH 50/54] more random in neuronal network --- .../Setting2_2unsym/config_slave.yaml | 6 +++--- .../Setting2_2unsym/config_slave2.yaml | 6 +++--- .../SimulationSlavesConfig/Setting3_112/config_slave.yaml | 6 +++--- .../SimulationSlavesConfig/Setting3_112/config_slave2.yaml | 6 +++--- .../SimulationSlavesConfig/Setting3_112/config_slave3.yaml | 6 +++--- .../SimulationSlavesConfig/Setting3_222/config_slave2.yaml | 6 +++--- .../SimulationSlavesConfig/Setting3_222/config_slave3.yaml | 6 +++--- .../Setting8_22213122/config_slave.yaml | 6 +++--- .../Setting8_22213122/config_slave2.yaml | 6 +++--- .../Setting8_22213122/config_slave3.yaml | 6 +++--- .../Setting8_22213122/config_slave4.yaml | 6 +++--- .../Setting8_22213122/config_slave5.yaml | 6 +++--- .../Setting8_22213122/config_slave6.yaml | 6 +++--- .../Setting8_22213122/config_slave7.yaml | 6 +++--- .../Setting8_22213122/config_slave8.yaml | 6 +++--- openAI_RRM/rrm_agent_multi.py | 4 ++-- 16 files changed, 47 insertions(+), 47 deletions(-) diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml index 6bedbd9..d10eea6 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml index 1e3f80f..637709b 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index febc7c5..56845c8 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index a06c215..cb5ae11 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index 1b969d3..dce0357 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml index 2c58b54..8158de8 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml index db7eff0..1c03034 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml index a4f43f5..83a70b7 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml index 6130370..2f16895 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave2.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml index ea4a5ab..3993fde 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave3.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml index b15a2ba..6b13e34 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave4.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml index 8a49183..4670e55 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave5.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml index 4258694..47127a6 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave6.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml index f29af52..b2d9670 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave7.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml index 6d830b0..2aa4366 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/config_slave8.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" +#broker: +# xsub: "tcp://127.0.0.1:8990" +# xpub: "tcp://127.0.0.1:8989" control_applications: myFilter: diff --git a/openAI_RRM/rrm_agent_multi.py b/openAI_RRM/rrm_agent_multi.py index 4b66c41..90e646e 100644 --- a/openAI_RRM/rrm_agent_multi.py +++ b/openAI_RRM/rrm_agent_multi.py @@ -19,8 +19,8 @@ import datetime from functools import reduce -AVGTIME_ONEVALUE_RAND = 2 -RANDVALUE_FIRST_EPISODE = 0.7 +AVGTIME_ONEVALUE_RAND = 5 +RANDVALUE_FIRST_EPISODE = 0.9 REWARD_INIT = 0.00001 SORT_VALUES = False SCENARIOS = 1 From 41677da0b3c245e18f300291d2e9b670f0a6633d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 30 Jul 2019 11:37:28 +0200 Subject: [PATCH 51/54] debug with testbed --- openAI_RRM/channel_controller.py | 11 ++++++++--- openAI_RRM/rrm_agent_multi.py | 4 +++- openAI_RRM/thompson_agent.py | 4 +++- openAI_RRM/thompson_agent_hysteresis.py | 4 +++- openAI_RRM/thompson_agent_hysteresis_schnell.py | 4 +++- 5 files changed, 20 insertions(+), 7 deletions(-) diff --git a/openAI_RRM/channel_controller.py b/openAI_RRM/channel_controller.py index b5b6076..f56407b 100755 --- a/openAI_RRM/channel_controller.py +++ b/openAI_RRM/channel_controller.py @@ -233,8 +233,13 @@ def get_num_neighbours(self): for node in self.get_nodes(): for device in node.get_devices(): for interface in device.get_interfaces(): - infos = device.get_neighbours(interface) - neighbours.append(len(infos)) + try: + infos = device.get_neighbours(interface) + neighbours.append(len(infos)) + except AttributeError: + if self.scenarios > 1: + print("Device module does not support get_neighbours, but there are multiple scenarios!") + neighbours.append(1) return neighbours def get_bandwidth(self): @@ -570,7 +575,7 @@ def get_actionSpace(self): for key, interface in enumerate(interfaceList): self.log.info(str(key) + ":" + interface['device']) if len(interfaceList) == 0: - return spaces.MultiDiscrete([0]) + return spaces.MultiDiscrete([]) maxValues = [len(self.availableChannels) for i in self._create_interface_list()] return spaces.MultiDiscrete(maxValues) #([ 5, 2, 2 ])(pow(len(self.availableChannels), len(interfaceList))) diff --git a/openAI_RRM/rrm_agent_multi.py b/openAI_RRM/rrm_agent_multi.py index 90e646e..568ddf8 100644 --- a/openAI_RRM/rrm_agent_multi.py +++ b/openAI_RRM/rrm_agent_multi.py @@ -180,7 +180,9 @@ def normalize_reward(reward, rewardpow, action): tmps_size = ob_space.shape s_size = tmps_size[0] * tmps_size[1] #s_size = list(map(lambda x: x * ob_space.high, s_size)) - a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) + a_size = 0 + if len(ac_space.nvec) > 0: + a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) if a_size == 0: print("there is no vaild AP - sleep 2 seconds") diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 2c6ce74..b449952 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -62,7 +62,9 @@ def map_action(mappedAction): print("Observation space: ", ob_space, ob_space.dtype) print("Action space: ", ac_space, ac_space.nvec) - a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) + a_size = 0 + if len(ac_space.nvec) > 0: + a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) avg = [] num = [] diff --git a/openAI_RRM/thompson_agent_hysteresis.py b/openAI_RRM/thompson_agent_hysteresis.py index a8341d5..0ecfdcd 100644 --- a/openAI_RRM/thompson_agent_hysteresis.py +++ b/openAI_RRM/thompson_agent_hysteresis.py @@ -62,7 +62,9 @@ def map_action(mappedAction): print("Observation space: ", ob_space, ob_space.dtype) print("Action space: ", ac_space, ac_space.nvec) - a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) + a_size = 0 + if len(ac_space.nvec) > 0: + a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) avg = [] num = [] diff --git a/openAI_RRM/thompson_agent_hysteresis_schnell.py b/openAI_RRM/thompson_agent_hysteresis_schnell.py index bf6350a..ce3d508 100644 --- a/openAI_RRM/thompson_agent_hysteresis_schnell.py +++ b/openAI_RRM/thompson_agent_hysteresis_schnell.py @@ -62,7 +62,9 @@ def map_action(mappedAction): print("Observation space: ", ob_space, ob_space.dtype) print("Action space: ", ac_space, ac_space.nvec) - a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) + a_size = 0 + if len(ac_space.nvec) > 0: + a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) avg = [] num = [] From 8e77edb944a43fe26b472c2e880427f50f504315 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Tue, 24 Sep 2019 17:02:13 +0200 Subject: [PATCH 52/54] prepare submission --- bachelor_controller/LICENSE | 9 - bachelor_controller/common.py | 28 -- bachelor_controller/config_local.yaml | 29 -- bachelor_controller/config_local_macosx.yaml | 33 -- bachelor_controller/config_master.yaml | 28 -- bachelor_controller/config_slave.yaml | 25 -- bachelor_controller/config_slave2.yaml | 25 -- bachelor_controller/my_control_app.py | 263 ---------------- bachelor_controller/my_filter.py | 53 ---- bachelor_controller/my_local_control_app.py | 152 --------- bachelor_controller/readme.txt | 11 - .../Setting2_2/config_slave.yaml | 11 +- .../Setting2_2/config_slave2.yaml | 11 +- .../Setting2_2/my_filter.py | 53 ---- .../Setting2_2/readme.txt | 8 +- .../Setting2_2unsym/config_slave.yaml | 7 +- .../Setting2_2unsym/config_slave2.yaml | 7 +- .../Setting2_2unsym/my_filter.py | 53 ---- .../Setting2_2unsym/readme.txt | 8 +- .../Setting3_112/config_slave.yaml | 9 - .../Setting3_112/config_slave2.yaml | 9 - .../Setting3_112/config_slave3.yaml | 9 - .../Setting3_112/my_filter.py | 53 ---- .../Setting3_112/readme.txt | 8 +- .../Setting3_222/config_slave.yaml | 11 +- .../Setting3_222/config_slave2.yaml | 11 +- .../Setting3_222/config_slave3.yaml | 11 +- .../Setting3_222/my_filter.py | 53 ---- .../Setting3_222/readme.txt | 8 +- .../Setting3_varSet/config_slave.yaml | 15 +- .../Setting3_varSet/config_slave2.yaml | 15 +- .../Setting3_varSet/config_slave3.yaml | 15 +- .../Setting3_varSet/my_filter.py | 53 ---- .../Setting3_varSet/readme.txt | 8 +- .../Setting3_varSetsmall/common.py | 28 -- .../Setting3_varSetsmall/config_slave.yaml | 38 --- .../Setting3_varSetsmall/config_slave2.yaml | 38 --- .../Setting3_varSetsmall/config_slave3.yaml | 38 --- .../Setting3_varSetsmall/my_filter.py | 53 ---- .../Setting3_varSetsmall/readme.txt | 15 - .../Setting8_22213122/readme.txt | 13 +- openAI_RRM/config_slave.yaml | 6 +- openAI_RRM/readme.txt | 7 +- openAI_RRM/{rrm_agent.py => rl_agent.py} | 0 .../{rrm_agent_multi.py => rl_agent_multi.py} | 8 +- ...multi.py => rl_agent_multi_evalnetwork.py} | 77 +---- .../rrm_agent_evalmodel1_cliap-apcli.py | 264 ---------------- openAI_RRM/rrm_agent_evalmodel2_sameset112.py | 279 ----------------- ...rrm_agent_evalmodel3_difset_sort_cli-ap.py | 284 ----------------- .../rrm_agent_evalmodel4_difset_unsorted.py | 287 ----------------- ...valmodel5_difset_sort_cli-ap_detecttopo.py | 290 ------------------ openAI_RRM/thompson_agent.py | 4 +- ...steresis_schnell.py => thompson_agent2.py} | 0 openAI_RRM/thompson_agent_hysteresis.py | 142 --------- 54 files changed, 67 insertions(+), 2906 deletions(-) delete mode 100644 bachelor_controller/LICENSE delete mode 100755 bachelor_controller/common.py delete mode 100644 bachelor_controller/config_local.yaml delete mode 100644 bachelor_controller/config_local_macosx.yaml delete mode 100644 bachelor_controller/config_master.yaml delete mode 100644 bachelor_controller/config_slave.yaml delete mode 100644 bachelor_controller/config_slave2.yaml delete mode 100755 bachelor_controller/my_control_app.py delete mode 100755 bachelor_controller/my_filter.py delete mode 100755 bachelor_controller/my_local_control_app.py delete mode 100644 bachelor_controller/readme.txt delete mode 100755 openAI_RRM/SimulationSlavesConfig/Setting2_2/my_filter.py delete mode 100755 openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/my_filter.py delete mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_112/my_filter.py delete mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_222/my_filter.py delete mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_varSet/my_filter.py delete mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/common.py delete mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave.yaml delete mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave2.yaml delete mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave3.yaml delete mode 100755 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/my_filter.py delete mode 100644 openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/readme.txt rename openAI_RRM/{rrm_agent.py => rl_agent.py} (100%) rename openAI_RRM/{rrm_agent_multi.py => rl_agent_multi.py} (98%) rename openAI_RRM/{rrm_agent_evalmodel6_multi.py => rl_agent_multi_evalnetwork.py} (71%) delete mode 100644 openAI_RRM/rrm_agent_evalmodel1_cliap-apcli.py delete mode 100644 openAI_RRM/rrm_agent_evalmodel2_sameset112.py delete mode 100644 openAI_RRM/rrm_agent_evalmodel3_difset_sort_cli-ap.py delete mode 100644 openAI_RRM/rrm_agent_evalmodel4_difset_unsorted.py delete mode 100644 openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py rename openAI_RRM/{thompson_agent_hysteresis_schnell.py => thompson_agent2.py} (100%) delete mode 100644 openAI_RRM/thompson_agent_hysteresis.py diff --git a/bachelor_controller/LICENSE b/bachelor_controller/LICENSE deleted file mode 100644 index 8ac59fd..0000000 --- a/bachelor_controller/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Technische Universität Berlin - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/bachelor_controller/common.py b/bachelor_controller/common.py deleted file mode 100755 index 370d25f..0000000 --- a/bachelor_controller/common.py +++ /dev/null @@ -1,28 +0,0 @@ -from uniflex.core import events - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class AveragedSpectrumScanSampleEvent(events.EventBase): - def __init__(self, avg): - super().__init__() - self.avg = avg - - -class StartMyFilterEvent(events.EventBase): - def __init__(self): - super().__init__() - - -class StopMyFilterEvent(events.EventBase): - def __init__(self): - super().__init__() - - -class ChangeWindowSizeEvent(events.EventBase): - def __init__(self, value): - super().__init__() - self.window = value diff --git a/bachelor_controller/config_local.yaml b/bachelor_controller/config_local.yaml deleted file mode 100644 index 92999a8..0000000 --- a/bachelor_controller/config_local.yaml +++ /dev/null @@ -1,29 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'Local_Controller' - info: 'CPs and modules on single node' - iface: 'lo' - -control_applications: - myController: - file : my_local_control_app.py - class_name : MyController - kwargs : {} - - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - -modules: - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - - simple: - module : uniflex_module_simple - class_name : SimpleModule2 - devices : ['phy0'] diff --git a/bachelor_controller/config_local_macosx.yaml b/bachelor_controller/config_local_macosx.yaml deleted file mode 100644 index d2abb09..0000000 --- a/bachelor_controller/config_local_macosx.yaml +++ /dev/null @@ -1,33 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'Local_Controller' - info: 'CPs and modules on single node' - iface: 'en0' - -control_applications: - myController: - file : my_local_control_app.py - class_name : MyController - kwargs : {} - - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - -modules: - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - - simple: - module : uniflex_module_simple - class_name : SimpleModule2 - devices : ['phy0'] - - iperf: - module : uniflex_module_iperf - class_name : IperfModule diff --git a/bachelor_controller/config_master.yaml b/bachelor_controller/config_master.yaml deleted file mode 100644 index a4d507b..0000000 --- a/bachelor_controller/config_master.yaml +++ /dev/null @@ -1,28 +0,0 @@ -## UniFlex Agent config file - -config: - name: "Global_Controller" - info: 'agent hosts global controller' - iface: 'lo' - sub: "tcp://127.0.0.1:8990" - pub: "tcp://127.0.0.1:8989" - -broker: - xpub: "tcp://127.0.0.1:8990" - xsub: "tcp://127.0.0.1:8989" - -control_applications: - myController: - file : my_control_app.py - class_name : MyController - kwargs : {} - -modules: - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoveryMasterModule - kwargs: {"iface":"lo", - "groupName":"uniflex_1234", - "sub":"tcp://127.0.0.1:8990", - "pub":"tcp://127.0.0.1:8989" - } diff --git a/bachelor_controller/config_slave.yaml b/bachelor_controller/config_slave.yaml deleted file mode 100644 index 4ce221e..0000000 --- a/bachelor_controller/config_slave.yaml +++ /dev/null @@ -1,25 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'HC node' - info: 'filter runs on local node' - iface: 'lo' - -control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - -modules: - simple: - module : uniflex_module_simple - class_name : SimpleModule4 - devices : ['phy0'] - kwargs : { "MAC_List" : ["38:10:d5:d7:54:82", "38:10:d5:d7:59:23"]} - diff --git a/bachelor_controller/config_slave2.yaml b/bachelor_controller/config_slave2.yaml deleted file mode 100644 index 21c58dc..0000000 --- a/bachelor_controller/config_slave2.yaml +++ /dev/null @@ -1,25 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'HC node' - info: 'filter runs on local node' - iface: 'lo' - -control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - -modules: - simple: - module : uniflex_module_simple - class_name : SimpleModule4 - devices : ['phy0'] - kwargs : { "MAC_List" : ["38:10:d5:9a:0b:60"]} - diff --git a/bachelor_controller/my_control_app.py b/bachelor_controller/my_control_app.py deleted file mode 100755 index d45a6f6..0000000 --- a/bachelor_controller/my_control_app.py +++ /dev/null @@ -1,263 +0,0 @@ -import logging -import datetime -import random - -from sbi.radio_device.events import PacketLossEvent -from uniflex.core import modules -from uniflex.core import events -from uniflex.core.timer import TimerEventSender -from common import AveragedSpectrumScanSampleEvent -from common import ChangeWindowSizeEvent - -__author__ = "Piotr Gawlowicz, Sascha Rösler" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de, s.resler@campus.tu-berlin.de" - - -class PeriodicEvaluationTimeEvent(events.TimeEvent): - def __init__(self): - super().__init__() - - -class MyController(modules.ControlApplication): - def __init__(self): - super(MyController, self).__init__() - self.log = logging.getLogger('MyController') - self.running = False - - self.timeInterval = 10 - self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) - self.timer.start(self.timeInterval) - - self.packetLossEventsEnabled = False - self.channel = 1 - - @modules.on_start() - def my_start_function(self): - print("start control app") - self.running = True - - @modules.on_exit() - def my_stop_function(self): - print("stop control app") - self.running = False - - @modules.on_event(events.NewNodeEvent) - def add_node(self, event): - node = event.node - - self.log.info("Added new node: {}, Local: {}" - .format(node.uuid, node.local)) - self._add_node(node) - - for dev in node.get_devices(): - print("Dev: ", dev.name) - print(dev) - - for m in node.get_modules(): - print("Module: ", m.name) - print(m) - - for app in node.get_control_applications(): - print("App: ", app.name) - print(app) - - device = node.get_device(0) - device.set_tx_power(15, "wlan0") - device.set_channel(random.randint(1, 11), "wlan0") - #device.packet_loss_monitor_start() - #device.spectral_scan_start() - # device.play_waveform() - # TODO: is_implemented() - - @modules.on_event(events.NodeExitEvent) - @modules.on_event(events.NodeLostEvent) - def remove_node(self, event): - self.log.info("Node lost".format()) - node = event.node - reason = event.reason - if self._remove_node(node): - self.log.info("Node: {}, Local: {} removed reason: {}" - .format(node.uuid, node.local, reason)) - - @modules.on_event(PacketLossEvent) - def serve_packet_loss_event(self, event): - node = event.node - device = event.device - self.log.info("Packet loss in node {}, dev: {}" - .format(node.hostname, device.name)) - - @modules.on_event(AveragedSpectrumScanSampleEvent) - def serve_spectral_scan_sample(self, event): - avgSample = event.avg - self.log.info("Averaged Spectral Scan Sample: {}" - .format(avgSample)) - - def default_cb(self, data): - node = data.node - devName = None - if data.device: - devName = data.device.name - msg = data.msg - print("Default Callback: " - "Node: {}, Dev: {}, Data: {}" - .format(node.hostname, devName, msg)) - - def get_power_cb(self, data): - node = data.node - msg = data.msg - dev = node.get_device(0) - print("Power in " - "Node: {}, Dev: {}, was set to: {}" - .format(node.hostname, dev.name, msg)) - - newPwr = random.randint(1, 20) - dev.blocking(False).set_tx_power(newPwr, "wlan0") - print("Power in " - "Node: {}, Dev: {}, was set to: {}" - .format(node.hostname, dev.name, newPwr)) - - def scheduled_get_channel_cb(self, data): - node = data.node - msg = data.msg - dev = node.get_device(0) - print("Scheduled get_channel; Power in " - "Node: {}, Dev: {}, was set to: {}" - .format(node.hostname, dev.name, msg)) - - @modules.on_event(PeriodicEvaluationTimeEvent) - def periodic_evaluation(self, event): - # go over collected samples, etc.... - # make some decisions, etc... - print("Periodic Evaluation") - print("My nodes: ", [node.hostname for node in self.get_nodes()]) - self.timer.start(self.timeInterval) - - if len(self.get_nodes()) == 0: - return - - flows = [] - for node in self.get_nodes(): - for device in node.get_devices(): - device.spectral_scan_stop() - chnum = device.get_channel("wlan0") - chw = device.get_channel_width("wlan0") - infos = device.get_info_of_connected_devices("wlan0") - - for mac in infos: - flows.append({'mac address' : mac, 'channel number' : chnum, 'channel width' : chw}) - - for node in self.get_nodes(): - print ("work " + node.hostname) - for device in node.get_devices(): - - if type(device.my_control_flow) is not list: - device.my_control_flow = [] - - for flow in device.my_control_flow: - flow['old'] = True - - device.set_packet_counter(flows, "wlan0") - chnum = device.get_channel("wlan0") - chw = device.get_channel_width("wlan0") - infos = device.get_info_of_connected_devices("wlan0") - - bandwidth = {} - - for mac in infos: - values = infos[mac] - newTxBytes = int(values['tx bytes'][0]) - - flow = [d for d in device.my_control_flow if d['mac address'] == mac] - if len(flow) > 0: - flow = flow[0] - dif = datetime.datetime.now() - flow['last update'] - bandwidth[mac] = (newTxBytes - flow['tx bytes'] ) / (dif.total_seconds() + dif.microseconds / 1000000.0) - flow['tx bytes'] = newTxBytes - flow['last update'] = datetime.datetime.now() - flow['old'] = False - else : - device.my_control_flow.append({'mac address' : mac, 'tx bytes' : newTxBytes, 'last update' : datetime.datetime.now(), 'old' : False}) - - for flow in device.my_control_flow: - if flow['old']: - device.my_control_flow.remove(flow) - - print ("device " + device.name + " operates on channel " + str(chnum) + " with a bandwidth of " + chw + " - change to channel " + str(self.channel)) - print(bandwidth) - - device.blocking(False).set_channel(self.channel, "wlan0") - - self.channel += 1 - if self.channel > 13: - self.channel = 1 - ''' - node = self.get_node(0) - device = node.get_device(0) - - if device.is_packet_loss_monitor_running(): - device.packet_loss_monitor_stop() - device.spectral_scan_stop() - else: - device.packet_loss_monitor_start() - device.spectral_scan_start() - - avgFilterApp = None - for app in node.get_control_applications(): - if app.name == "MyAvgFilter": - avgFilterApp = app - break - - if avgFilterApp.is_running(): - myValue = random.randint(1, 20) - [nValue1, nValue2] = avgFilterApp.blocking(True).add_two(myValue) - print("My value: {} + 2 = {}".format(myValue, nValue1)) - print("My value: {} * 2 = {}".format(myValue, nValue2)) - avgFilterApp.stop() - - newWindow = random.randint(10, 50) - old = avgFilterApp.blocking(True).get_window_size() - print("Old Window Size : {}".format(old)) - avgFilterApp.blocking(True).change_window_size_func(newWindow) - nValue = avgFilterApp.blocking(True).get_window_size() - print("New Window Size : {}".format(nValue)) - - else: - avgFilterApp.start() - newWindow = random.randint(10, 50) - event = ChangeWindowSizeEvent(newWindow) - avgFilterApp.send_event(event) - - # execute non-blocking function immediately - device.blocking(False).set_tx_power(random.randint(1, 20), "wlan0") - - # execute non-blocking function immediately, with specific callback - device.callback(self.get_power_cb).get_tx_power("wlan0") - - # schedule non-blocking function delay - device.delay(3).callback(self.default_cb).get_tx_power("wlan0") - - # schedule non-blocking function exec time - exec_time = datetime.datetime.now() + datetime.timedelta(seconds=3) - newChannel = random.randint(1, 11) - device.exec_time(exec_time).set_channel(newChannel, "wlan0") - - # schedule execution of function multiple times - start_date = datetime.datetime.now() + datetime.timedelta(seconds=2) - interval = datetime.timedelta(seconds=1) - repetitionNum = 3 - device.exec_time(start_date, interval, repetitionNum).callback(self.scheduled_get_channel_cb).get_channel("wlan0") - - # execute blocking function immediately - result = device.get_channel("wlan0") - print("{} Channel is: {}".format(datetime.datetime.now(), result)) - - # exception handling, clean_per_flow_tx_power_table implementation - # raises exception - try: - device.clean_per_flow_tx_power_table("wlan0") - except Exception as e: - print("{} !!!Exception!!!: {}".format( - datetime.datetime.now(), e)) - ''' diff --git a/bachelor_controller/my_filter.py b/bachelor_controller/my_filter.py deleted file mode 100755 index 59d59a2..0000000 --- a/bachelor_controller/my_filter.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -from uniflex.core import modules -from sbi.radio_device.events import SpectralScanSampleEvent -from common import AveragedSpectrumScanSampleEvent -from common import ChangeWindowSizeEvent - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class MyAvgFilter(modules.ControlApplication): - def __init__(self, window): - super(MyAvgFilter, self).__init__() - self.log = logging.getLogger('MyFilter') - self.window = window - self.samples = [] - - @modules.on_event(ChangeWindowSizeEvent) - def change_window_size(self, event): - self.log.info("New window size: {}".format(event.window)) - self.window = event.window - - def change_window_size_func(self, newWindow): - self.log.info("New window size: {}".format(newWindow)) - self.window = newWindow - - def get_window_size(self): - return self.window - - @modules.on_event(SpectralScanSampleEvent) - def serve_spectral_scan_sample(self, event): - sample = event.sample - node = event.node - device = event.device - self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" - .format(sample, node, device)) - - self.samples.append(sample) - - if len(self.samples) == self.window: - s = sum(self.samples) - self.samples.pop(0) - avg = s / self.window - self.log.debug("Calculated average: {}".format(avg)) - event = AveragedSpectrumScanSampleEvent(avg) - self.send_event(event) - - def add_two(self, value): - value1 = value + 2 - value2 = value * 2 - return [value1, value2] diff --git a/bachelor_controller/my_local_control_app.py b/bachelor_controller/my_local_control_app.py deleted file mode 100755 index 4b0db07..0000000 --- a/bachelor_controller/my_local_control_app.py +++ /dev/null @@ -1,152 +0,0 @@ -import logging -import datetime -import random -from sbi.radio_device.events import PacketLossEvent -from uniflex.core import modules -from uniflex.core import events -from uniflex.core.timer import TimerEventSender -from common import AveragedSpectrumScanSampleEvent -from common import StartMyFilterEvent -from common import StopMyFilterEvent - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class PeriodicEvaluationTimeEvent(events.TimeEvent): - def __init__(self): - super().__init__() - - -class MyController(modules.ControlApplication): - def __init__(self): - super(MyController, self).__init__() - self.log = logging.getLogger('MyController') - self.running = False - self.nodes = [] - - self.timeInterval = 10 - self.timer = TimerEventSender(self, PeriodicEvaluationTimeEvent) - self.timer.start(self.timeInterval) - - self.myFilterRunning = False - self.packetLossEventsEnabled = False - - @modules.on_start() - def my_start_function(self): - print("start control app") - self.running = True - - node = self.localNode - self.log.info("My local, Local: {}" - .format(node.local)) - - for dev in node.get_devices(): - print("Dev: ", dev.name) - print(dev) - - for m in node.get_modules(): - print("Module: ", m.name) - print(m) - - for app in node.get_control_applications(): - print("App: ", app.name) - print(app) - - device = node.get_device(0) - device.set_tx_power(15, 'ath0') - device.set_channel(random.randint(1, 11), 'ath0') - device.packet_loss_monitor_start() - self.packetLossEventsEnabled = True - device.spectral_scan_start() - - @modules.on_exit() - def my_stop_function(self): - print("stop control app") - self.running = False - - @modules.on_event(PacketLossEvent) - def serve_packet_loss_event(self, event): - node = event.node - device = event.device - self.log.info("Packet loss, dev: {}" - .format(device)) - - @modules.on_event(AveragedSpectrumScanSampleEvent) - def serve_spectral_scan_sample(self, event): - avgSample = event.avg - self.log.info("Averaged Spectral Scan Sample: {}" - .format(avgSample)) - - def default_cb(self, data): - node = data.node - devName = None - if data.device: - devName = data.device.name - msg = data.msg - print("Default Callback: " - "Dev: {}, Data: {}" - .format(devName, msg)) - - def get_power_cb(self, data): - node = data.node - dev = data.device - msg = data.msg - print("Power in " - "Dev: {}, was set to: {}" - .format(dev.name, msg)) - - @modules.on_event(PeriodicEvaluationTimeEvent) - def periodic_evaluation(self, event): - # go over collected samples, etc.... - # make some decisions, etc... - print("Periodic Evaluation") - - node = self.localNode - device = node.get_device(0) - - self.log.info("My local node, Local: {}" - .format(node.local)) - self.timer.start(self.timeInterval) - - if self.packetLossEventsEnabled: - device.packet_loss_monitor_stop() - self.packetLossEventsEnabled = False - else: - device.packet_loss_monitor_start() - self.packetLossEventsEnabled = True - - if self.myFilterRunning: - self.send_event(StopMyFilterEvent()) - self.myFilterRunning = False - else: - self.send_event(StartMyFilterEvent()) - self.myFilterRunning = True - - # execute non-blocking function immediately - device.blocking(False).set_tx_power(random.randint(1, 20), 'ath0') - - # execute non-blocking function immediately, with specific callback - device.callback(self.get_power_cb).get_tx_power('ath0') - - # schedule non-blocking function delay - device.delay(3).callback(self.default_cb).get_tx_power("wlan0") - - # schedule non-blocking function exec time - exec_time = datetime.datetime.now() + datetime.timedelta(seconds=3) - newChannel = random.randint(1, 11) - device.exec_time(exec_time).set_channel(newChannel, 'ath0') - - # execute blocking function immediately - result = device.get_channel('ath0') - print("{} Channel is: {}".format(datetime.datetime.now(), result)) - - # exception handling, clean_per_flow_tx_power_table implementation - # raises exception - try: - device.clean_per_flow_tx_power_table("wlan0") - except Exception as e: - print("{} !!!Exception!!!: {}".format( - datetime.datetime.now(), e)) diff --git a/bachelor_controller/readme.txt b/bachelor_controller/readme.txt deleted file mode 100644 index cbbfb5a..0000000 --- a/bachelor_controller/readme.txt +++ /dev/null @@ -1,11 +0,0 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - -# 2a. Run control program in master node: -uniflex-broker -# 2b. Run control program in master node: -uniflex-agent --config ./config_master.yaml -# 2c. Run modules in slave node: -uniflex-agent --config ./config_slave.yaml - -# For debugging mode run with -v option diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml index f41a604..230e9bd 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave.yaml @@ -2,22 +2,13 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm control simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -#broker: -# xsub: "tcp://127.0.0.1:8990" -# xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml index 1d52f8b..79d760b 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/config_slave2.yaml @@ -2,22 +2,13 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm control simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -#broker: -# xsub: "tcp://127.0.0.1:8990" -# xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting2_2/my_filter.py deleted file mode 100755 index 59d59a2..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/my_filter.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -from uniflex.core import modules -from sbi.radio_device.events import SpectralScanSampleEvent -from common import AveragedSpectrumScanSampleEvent -from common import ChangeWindowSizeEvent - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class MyAvgFilter(modules.ControlApplication): - def __init__(self, window): - super(MyAvgFilter, self).__init__() - self.log = logging.getLogger('MyFilter') - self.window = window - self.samples = [] - - @modules.on_event(ChangeWindowSizeEvent) - def change_window_size(self, event): - self.log.info("New window size: {}".format(event.window)) - self.window = event.window - - def change_window_size_func(self, newWindow): - self.log.info("New window size: {}".format(newWindow)) - self.window = newWindow - - def get_window_size(self): - return self.window - - @modules.on_event(SpectralScanSampleEvent) - def serve_spectral_scan_sample(self, event): - sample = event.sample - node = event.node - device = event.device - self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" - .format(sample, node, device)) - - self.samples.append(sample) - - if len(self.samples) == self.window: - s = sum(self.samples) - self.samples.pop(0) - avg = s / self.window - self.log.debug("Calculated average: {}".format(avg)) - event = AveragedSpectrumScanSampleEvent(avg) - self.send_event(event) - - def add_two(self, value): - value1 = value + 2 - value2 = value * 2 - return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt index a99904a..bc913c7 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt @@ -1,13 +1,13 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - +# Start environment if Uniflex is installed in some source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master.yaml +python3 rrm_agent.py --config ./config_master_simulation.yaml + # 2c. Run modules in slave node: +#Simulation uniflex-agent --config ./config_slave.yaml uniflex-agent --config ./config_slave2.yaml diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml index d10eea6..a196962 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave.yaml @@ -2,7 +2,7 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm control simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" @@ -13,11 +13,6 @@ config: # xpub: "tcp://127.0.0.1:8989" control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml index 637709b..89d5374 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/config_slave2.yaml @@ -2,7 +2,7 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm control simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" @@ -13,11 +13,6 @@ config: # xpub: "tcp://127.0.0.1:8989" control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/my_filter.py deleted file mode 100755 index 59d59a2..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/my_filter.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -from uniflex.core import modules -from sbi.radio_device.events import SpectralScanSampleEvent -from common import AveragedSpectrumScanSampleEvent -from common import ChangeWindowSizeEvent - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class MyAvgFilter(modules.ControlApplication): - def __init__(self, window): - super(MyAvgFilter, self).__init__() - self.log = logging.getLogger('MyFilter') - self.window = window - self.samples = [] - - @modules.on_event(ChangeWindowSizeEvent) - def change_window_size(self, event): - self.log.info("New window size: {}".format(event.window)) - self.window = event.window - - def change_window_size_func(self, newWindow): - self.log.info("New window size: {}".format(newWindow)) - self.window = newWindow - - def get_window_size(self): - return self.window - - @modules.on_event(SpectralScanSampleEvent) - def serve_spectral_scan_sample(self, event): - sample = event.sample - node = event.node - device = event.device - self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" - .format(sample, node, device)) - - self.samples.append(sample) - - if len(self.samples) == self.window: - s = sum(self.samples) - self.samples.pop(0) - avg = s / self.window - self.log.debug("Calculated average: {}".format(avg)) - event = AveragedSpectrumScanSampleEvent(avg) - self.send_event(event) - - def add_two(self, value): - value1 = value + 2 - value2 = value * 2 - return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt index a99904a..bc913c7 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt @@ -1,13 +1,13 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - +# Start environment if Uniflex is installed in some source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master.yaml +python3 rrm_agent.py --config ./config_master_simulation.yaml + # 2c. Run modules in slave node: +#Simulation uniflex-agent --config ./config_slave.yaml uniflex-agent --config ./config_slave2.yaml diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml index 56845c8..613455d 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave.yaml @@ -8,16 +8,7 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -#broker: -# xsub: "tcp://127.0.0.1:8990" -# xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml index cb5ae11..1631d92 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave2.yaml @@ -8,16 +8,7 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -#broker: -# xsub: "tcp://127.0.0.1:8990" -# xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml index dce0357..0f3ecce 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/config_slave3.yaml @@ -8,16 +8,7 @@ config: sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -#broker: -# xsub: "tcp://127.0.0.1:8990" -# xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting3_112/my_filter.py deleted file mode 100755 index 59d59a2..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/my_filter.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -from uniflex.core import modules -from sbi.radio_device.events import SpectralScanSampleEvent -from common import AveragedSpectrumScanSampleEvent -from common import ChangeWindowSizeEvent - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class MyAvgFilter(modules.ControlApplication): - def __init__(self, window): - super(MyAvgFilter, self).__init__() - self.log = logging.getLogger('MyFilter') - self.window = window - self.samples = [] - - @modules.on_event(ChangeWindowSizeEvent) - def change_window_size(self, event): - self.log.info("New window size: {}".format(event.window)) - self.window = event.window - - def change_window_size_func(self, newWindow): - self.log.info("New window size: {}".format(newWindow)) - self.window = newWindow - - def get_window_size(self): - return self.window - - @modules.on_event(SpectralScanSampleEvent) - def serve_spectral_scan_sample(self, event): - sample = event.sample - node = event.node - device = event.device - self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" - .format(sample, node, device)) - - self.samples.append(sample) - - if len(self.samples) == self.window: - s = sum(self.samples) - self.samples.pop(0) - avg = s / self.window - self.log.debug("Calculated average: {}".format(avg)) - event = AveragedSpectrumScanSampleEvent(avg) - self.send_event(event) - - def add_two(self, value): - value1 = value + 2 - value2 = value * 2 - return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt index 5a22888..c1d3666 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt @@ -1,13 +1,13 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - +# Start environment if Uniflex is installed in some source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master.yaml +python3 rrm_agent.py --config ./config_master_simulation.yaml + # 2c. Run modules in slave node: +#Simulation uniflex-agent --config ./config_slave.yaml uniflex-agent --config ./config_slave2.yaml uniflex-agent --config ./config_slave3.yaml diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml index b7a42ce..d4107ed 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave.yaml @@ -2,22 +2,13 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -#broker: -# xsub: "tcp://127.0.0.1:8990" -# xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml index 8158de8..eba4afa 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave2.yaml @@ -2,22 +2,13 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -#broker: -# xsub: "tcp://127.0.0.1:8990" -# xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml index 1c03034..39afc9f 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/config_slave3.yaml @@ -2,22 +2,13 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -#broker: -# xsub: "tcp://127.0.0.1:8990" -# xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting3_222/my_filter.py deleted file mode 100755 index 59d59a2..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/my_filter.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -from uniflex.core import modules -from sbi.radio_device.events import SpectralScanSampleEvent -from common import AveragedSpectrumScanSampleEvent -from common import ChangeWindowSizeEvent - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class MyAvgFilter(modules.ControlApplication): - def __init__(self, window): - super(MyAvgFilter, self).__init__() - self.log = logging.getLogger('MyFilter') - self.window = window - self.samples = [] - - @modules.on_event(ChangeWindowSizeEvent) - def change_window_size(self, event): - self.log.info("New window size: {}".format(event.window)) - self.window = event.window - - def change_window_size_func(self, newWindow): - self.log.info("New window size: {}".format(newWindow)) - self.window = newWindow - - def get_window_size(self): - return self.window - - @modules.on_event(SpectralScanSampleEvent) - def serve_spectral_scan_sample(self, event): - sample = event.sample - node = event.node - device = event.device - self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" - .format(sample, node, device)) - - self.samples.append(sample) - - if len(self.samples) == self.window: - s = sum(self.samples) - self.samples.pop(0) - avg = s / self.window - self.log.debug("Calculated average: {}".format(avg)) - event = AveragedSpectrumScanSampleEvent(avg) - self.send_event(event) - - def add_two(self, value): - value1 = value + 2 - value2 = value * 2 - return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt index 5a22888..c1d3666 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt @@ -1,13 +1,13 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - +# Start environment if Uniflex is installed in some source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master.yaml +python3 rrm_agent.py --config ./config_master_simulation.yaml + # 2c. Run modules in slave node: +#Simulation uniflex-agent --config ./config_slave.yaml uniflex-agent --config ./config_slave2.yaml uniflex-agent --config ./config_slave3.yaml diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml index 5635feb..b375052 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave.yaml @@ -2,22 +2,13 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm multi scenario simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule @@ -31,13 +22,13 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:03']], 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': 'path-to-configfiles/clientconfig/3_112_AP1', 'mode': 'generator', # training or working or generator 'numsClients': [1,1,5,2,4], #for generator 'maxNumClients' : 100, 'scenariosPerAPSetting': 60, 'clientPrefix' : "cc:cc:cc:cc:01:", - 'scenarioBackup' : '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/AP1_scenarios.var' + 'scenarioBackup' : 'path-to-backup-files/AP1_scenarios.var' }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml index 4d21eb9..b94b749 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave2.yaml @@ -2,22 +2,13 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm multi scenario simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule @@ -31,13 +22,13 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], []], 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': 'path-to-configfiles/3_112_AP2', 'mode': 'generator', # training or working or generator 'numsClients': [1,3,1,4,3], #for generator 'maxNumClients' : 100, 'clientPrefix' : "cc:cc:cc:cc:02:", 'scenariosPerAPSetting': 60, - 'scenarioBackup' : '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/AP2_scenarios.var' + 'scenarioBackup' : 'path-to-backup-files/AP2_scenarios.var' }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml index 3a59c5a..71c5647 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/config_slave3.yaml @@ -2,22 +2,13 @@ config: name: 'HC node' - info: 'filter runs on local node' + info: 'rrm multi scenario simulation' iface: 'lo' iface: 'lo' sub: "tcp://127.0.0.1:8990" pub: "tcp://127.0.0.1:8989" -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" - control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - discovery: module : uniflex_app_discovery_pyre class_name : PyreDiscoverySlaveModule @@ -31,13 +22,13 @@ modules: kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:01']], 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3', + 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': 'path-to-configfiles/3_112_AP3', 'mode': 'generator', # training or working or generator 'numsClients': [5,2,3,2,2], #for generator 'maxNumClients' : 100, 'scenariosPerAPSetting': 60, 'clientPrefix' : "cc:cc:cc:cc:03:", - 'scenarioBackup' : '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/AP3_scenarios.var' + 'scenarioBackup' : 'path-to-backup-files/AP3_scenarios.var' }} diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/my_filter.py deleted file mode 100755 index 59d59a2..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/my_filter.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -from uniflex.core import modules -from sbi.radio_device.events import SpectralScanSampleEvent -from common import AveragedSpectrumScanSampleEvent -from common import ChangeWindowSizeEvent - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class MyAvgFilter(modules.ControlApplication): - def __init__(self, window): - super(MyAvgFilter, self).__init__() - self.log = logging.getLogger('MyFilter') - self.window = window - self.samples = [] - - @modules.on_event(ChangeWindowSizeEvent) - def change_window_size(self, event): - self.log.info("New window size: {}".format(event.window)) - self.window = event.window - - def change_window_size_func(self, newWindow): - self.log.info("New window size: {}".format(newWindow)) - self.window = newWindow - - def get_window_size(self): - return self.window - - @modules.on_event(SpectralScanSampleEvent) - def serve_spectral_scan_sample(self, event): - sample = event.sample - node = event.node - device = event.device - self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" - .format(sample, node, device)) - - self.samples.append(sample) - - if len(self.samples) == self.window: - s = sum(self.samples) - self.samples.pop(0) - avg = s / self.window - self.log.debug("Calculated average: {}".format(avg)) - event = AveragedSpectrumScanSampleEvent(avg) - self.send_event(event) - - def add_two(self, value): - value1 = value + 2 - value2 = value * 2 - return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt index 5a22888..c1d3666 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt @@ -1,13 +1,13 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - +# Start environment if Uniflex is installed in some source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master.yaml +python3 rrm_agent.py --config ./config_master_simulation.yaml + # 2c. Run modules in slave node: +#Simulation uniflex-agent --config ./config_slave.yaml uniflex-agent --config ./config_slave2.yaml uniflex-agent --config ./config_slave3.yaml diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/common.py b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/common.py deleted file mode 100755 index 370d25f..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/common.py +++ /dev/null @@ -1,28 +0,0 @@ -from uniflex.core import events - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class AveragedSpectrumScanSampleEvent(events.EventBase): - def __init__(self, avg): - super().__init__() - self.avg = avg - - -class StartMyFilterEvent(events.EventBase): - def __init__(self): - super().__init__() - - -class StopMyFilterEvent(events.EventBase): - def __init__(self): - super().__init__() - - -class ChangeWindowSizeEvent(events.EventBase): - def __init__(self, value): - super().__init__() - self.window = value diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave.yaml deleted file mode 100644 index 03b4660..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave.yaml +++ /dev/null @@ -1,38 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'HC node' - info: 'filter runs on local node' - iface: 'lo' - iface: 'lo' - sub: "tcp://127.0.0.1:8990" - pub: "tcp://127.0.0.1:8989" - -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" - -control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - -modules: - simple: - module : uniflex_module_simple - class_name : SimpleModule4 - devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:01", "cc:cc:cc:cc:cc:02", "cc:cc:cc:cc:cc:03", "cc:cc:cc:cc:cc:04", "cc:cc:cc:cc:cc:05", "cc:cc:cc:cc:cc:06"], - 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:03']], - 'myMAC' : 'aa:aa:aa:aa:aa:01', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP1', - 'mode': 'training', # training or working - 'numsClients': [1,1,1,1,1] - }} - diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave2.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave2.yaml deleted file mode 100644 index cdc9e32..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave2.yaml +++ /dev/null @@ -1,38 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'HC node' - info: 'filter runs on local node' - iface: 'lo' - iface: 'lo' - sub: "tcp://127.0.0.1:8990" - pub: "tcp://127.0.0.1:8989" - -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" - -control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - -modules: - simple: - module : uniflex_module_simple - class_name : SimpleModule4 - devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:11", "cc:cc:cc:cc:cc:12", "cc:cc:cc:cc:cc:13", "cc:cc:cc:cc:cc:14", "cc:cc:cc:cc:cc:15", "cc:cc:cc:cc:cc:16"], - 'neighbors' : [['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], ['aa:aa:aa:aa:aa:01', 'aa:aa:aa:aa:aa:03'], []], - 'myMAC' : 'aa:aa:aa:aa:aa:02', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':1, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP2', - 'mode': 'training', # training or working - 'numsClients': [1,1,1,1,1] - }} - diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave3.yaml b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave3.yaml deleted file mode 100644 index a6b4147..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/config_slave3.yaml +++ /dev/null @@ -1,38 +0,0 @@ -## UniFlex Agent config file - -config: - name: 'HC node' - info: 'filter runs on local node' - iface: 'lo' - iface: 'lo' - sub: "tcp://127.0.0.1:8990" - pub: "tcp://127.0.0.1:8989" - -broker: - xsub: "tcp://127.0.0.1:8990" - xpub: "tcp://127.0.0.1:8989" - -control_applications: - myFilter: - file : my_filter.py - class_name : MyAvgFilter - kwargs : {"window": 5} - - discovery: - module : uniflex_app_discovery_pyre - class_name : PyreDiscoverySlaveModule - kwargs: {"iface":"lo", "groupName":"uniflex_1234"} - -modules: - simple: - module : uniflex_module_simple - class_name : SimpleModule4 - devices : ['phy0'] - kwargs : { "clients" : ["cc:cc:cc:cc:cc:21", "cc:cc:cc:cc:cc:22", "cc:cc:cc:cc:cc:23", "cc:cc:cc:cc:cc:24", "cc:cc:cc:cc:cc:25", "cc:cc:cc:cc:cc:26"], - 'neighbors' : [['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02', 'aa:aa:aa:aa:aa:01'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:02'], ['aa:aa:aa:aa:aa:01']], - 'myMAC' : 'aa:aa:aa:aa:aa:03', - 'simulation': {'channelSwitchingTime': 0, 'channelThroughputDefault':54000000, 'txBytesRandom':0, 'clientnum':2, 'clientconf': '/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/clientconfig/3_112_AP3', - 'mode': 'training', # training or working - 'numsClients': [2,2,1,1,1] - }} - diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/my_filter.py b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/my_filter.py deleted file mode 100755 index 59d59a2..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/my_filter.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -from uniflex.core import modules -from sbi.radio_device.events import SpectralScanSampleEvent -from common import AveragedSpectrumScanSampleEvent -from common import ChangeWindowSizeEvent - -__author__ = "Piotr Gawlowicz" -__copyright__ = "Copyright (c) 2016, Technische Universität Berlin" -__version__ = "0.1.0" -__email__ = "{gawlowicz}@tkn.tu-berlin.de" - - -class MyAvgFilter(modules.ControlApplication): - def __init__(self, window): - super(MyAvgFilter, self).__init__() - self.log = logging.getLogger('MyFilter') - self.window = window - self.samples = [] - - @modules.on_event(ChangeWindowSizeEvent) - def change_window_size(self, event): - self.log.info("New window size: {}".format(event.window)) - self.window = event.window - - def change_window_size_func(self, newWindow): - self.log.info("New window size: {}".format(newWindow)) - self.window = newWindow - - def get_window_size(self): - return self.window - - @modules.on_event(SpectralScanSampleEvent) - def serve_spectral_scan_sample(self, event): - sample = event.sample - node = event.node - device = event.device - self.log.debug("New SpectralScan Sample:{} from node {}, device: {}" - .format(sample, node, device)) - - self.samples.append(sample) - - if len(self.samples) == self.window: - s = sum(self.samples) - self.samples.pop(0) - avg = s / self.window - self.log.debug("Calculated average: {}".format(avg)) - event = AveragedSpectrumScanSampleEvent(avg) - self.send_event(event) - - def add_two(self, value): - value1 = value + 2 - value2 = value * 2 - return [value1, value2] diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/readme.txt deleted file mode 100644 index 5a22888..0000000 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSetsmall/readme.txt +++ /dev/null @@ -1,15 +0,0 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - -source ~/Uniflex/dev/bin/activate - -# 2a. Run control program in master node: -uniflex-broker -# 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master.yaml -# 2c. Run modules in slave node: -uniflex-agent --config ./config_slave.yaml -uniflex-agent --config ./config_slave2.yaml -uniflex-agent --config ./config_slave3.yaml - -# For debugging mode run with -v option diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt index 5a22888..c0734b5 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt @@ -1,15 +1,20 @@ -# 1. Run control program and all modules on local node -uniflex-agent --config ./config_local.yaml - +# Start environment if Uniflex is installed in some source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master.yaml +python3 rrm_agent.py --config ./config_master_simulation.yaml + # 2c. Run modules in slave node: +#Simulation uniflex-agent --config ./config_slave.yaml uniflex-agent --config ./config_slave2.yaml uniflex-agent --config ./config_slave3.yaml +uniflex-agent --config ./config_slave4.yaml +uniflex-agent --config ./config_slave5.yaml +uniflex-agent --config ./config_slave6.yaml +uniflex-agent --config ./config_slave7.yaml +uniflex-agent --config ./config_slave8.yaml # For debugging mode run with -v option diff --git a/openAI_RRM/config_slave.yaml b/openAI_RRM/config_slave.yaml index f005370..0488bb6 100644 --- a/openAI_RRM/config_slave.yaml +++ b/openAI_RRM/config_slave.yaml @@ -8,9 +8,9 @@ config: sub: "tcp://192.168.10.157:8990" pub: "tcp://192.168.10.157:8989" -broker: - xpub: "tcp://192.168.10.157:8990" - xsub: "tcp://192.168.10.157:8989" +#broker: +# xpub: "tcp://192.168.10.157:8990" +# xsub: "tcp://192.168.10.157:8989" control_applications: myFilter: diff --git a/openAI_RRM/readme.txt b/openAI_RRM/readme.txt index ce749c4..61b5e23 100644 --- a/openAI_RRM/readme.txt +++ b/openAI_RRM/readme.txt @@ -6,12 +6,13 @@ uniflex-broker # 2b. Run control program in master node: python3 rrm_agent.py --config ./config_master.yaml python3 rrm_agent.py --config ./config_master_simulation.yaml + # 2c. Run modules in slave node: #Linux WiFi AP uniflex-agent --config ./config_slave.yaml #Simulation -uniflex-agent --config ./SimulationSlavesConfig/Setting2_2/config_slave.yaml -uniflex-agent --config ./SimulationSlavesConfig/Setting2_2/config_slave2.yaml -uniflex-agent --config ./SimulationSlavesConfig/Setting2_2/config_slave3.yaml +uniflex-agent --config ./SimulationSlavesConfig/##Name of Experiment##/config_slave.yaml +uniflex-agent --config ./SimulationSlavesConfig/##Name of Experiment##/config_slave2.yaml +# and so on # For debugging mode run with -v option diff --git a/openAI_RRM/rrm_agent.py b/openAI_RRM/rl_agent.py similarity index 100% rename from openAI_RRM/rrm_agent.py rename to openAI_RRM/rl_agent.py diff --git a/openAI_RRM/rrm_agent_multi.py b/openAI_RRM/rl_agent_multi.py similarity index 98% rename from openAI_RRM/rrm_agent_multi.py rename to openAI_RRM/rl_agent_multi.py index 568ddf8..bc1b4e9 100644 --- a/openAI_RRM/rrm_agent_multi.py +++ b/openAI_RRM/rl_agent_multi.py @@ -24,7 +24,7 @@ REWARD_INIT = 0.00001 SORT_VALUES = False SCENARIOS = 1 -EPSILON_MAX_DECAY = 0.95 +EPSILON_MAX_DECAY = 0.999999999999 #0.95 EPSILON_MIN = 0.01 ACTIVATE_OBSERVER = False @@ -104,7 +104,7 @@ def normalize_reward(reward, rewardpow, action): #set reward to 1.0 if it is first value if maxreward[currentScenario] == REWARD_INIT: - reward = 1.0 + reward = 1.0 #/ (2**rewardpow) reward = pow(reward, rewardpow) @@ -252,7 +252,7 @@ def normalize_reward(reward, rewardpow, action): while episode < int(args.startepisode): epsilon_max *= EPSILON_MAX_DECAY - epsilon_max = max(epsilon_max, EPSILON_MIN)#max(pow(epsilon_max, 3), EPSILON_MIN) + epsilon_max = max(pow(epsilon_max, 3), EPSILON_MIN) # max(epsilon_max, EPSILON_MIN)# episode += 1 # Schleife für Episoden @@ -266,7 +266,7 @@ def normalize_reward(reward, rewardpow, action): epsilon = epsilon_max epsilon_max *= EPSILON_MAX_DECAY - epsilon_max = max(epsilon_max, EPSILON_MIN)#max(pow(epsilon_max, 3), EPSILON_MIN) + epsilon_max = max(pow(epsilon_max, 3), EPSILON_MIN) #max(epsilon_max, EPSILON_MIN) done = False aps = int(log(a_size, numChannels)) diff --git a/openAI_RRM/rrm_agent_evalmodel6_multi.py b/openAI_RRM/rl_agent_multi_evalnetwork.py similarity index 71% rename from openAI_RRM/rrm_agent_evalmodel6_multi.py rename to openAI_RRM/rl_agent_multi_evalnetwork.py index b874fbf..d5cfe68 100644 --- a/openAI_RRM/rrm_agent_evalmodel6_multi.py +++ b/openAI_RRM/rl_agent_multi_evalnetwork.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +##TODO: Insert path to neuronal network + import gym import UniFlexGym import tensorflow as tf @@ -270,8 +272,7 @@ def eval_handover(client, new_clients): ac_space = spaces.MultiDiscrete([2,2,2]) ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) -#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" -trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_varSetneuronalesNetz.train" +trainingfile = "path-to-neuronal-network.train" #generate random test data [test_data, test_labels] = generate_testdata(N_test, aps, maxclients, topologies, SORT_VALUES) @@ -280,55 +281,12 @@ def eval_handover(client, new_clients): for elem, label in zip(test_data, test_labels): clients.append({'clients': elem[:,0], 'aps': elem[:,1], 'valid':label}) -#clients = [ {'clients': [1, 1, 2], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, -# {'clients': [1, 1, 1], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, -# {'clients': [1, 1, 5], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, -# {'clients': [1, 3, 2], 'aps': [2,2,2], 'valid':[[0,1,0], [1,0,1]]}, -# {'clients': [5, 3, 4], 'aps': [2,2,2], 'valid':[[1,0,0], [0,1,1]]}, -# {'clients': [5, 1, 3], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, -# {'clients': [2, 4, 2], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, -# {'clients': [7, 1, 5], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, -# {'clients': [4, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, -# {'clients': [1, 1, 1], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, -# {'clients': [1, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]} -# ] -#clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} -# ] -handover = [{'clients': [1, 5, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]} - ] - -handover2 = [{'clients': [1, 5, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]} - ] - -#handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, -# {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} -# ] -handover3 = [{'clients': [2, 2, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, - {'clients': [2, 1, 2], 'aps': [1,2,1], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, - {'clients': [1, 2, 2], 'aps': [1,2,1], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} +special = [{'clients': [10, 10, 1000], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, + {'clients': [1000, 10, 10], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, + {'clients': [1000, 4, 2], 'aps': [1,0,1], 'valid':[[1,0,0], [0,0,1],[1,1,0], [0,1,1]]} ] -#aps = [[2,2,2]] -#aps2 = [[1,2,1]] - -#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], -# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], -# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] -#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], -# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], -# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] print("Observation space: ", ob_space, ob_space.dtype) print("Action space: ", ac_space, ac_space.nvec) @@ -350,26 +308,9 @@ def eval_handover(client, new_clients): metrics=['accuracy']) model.load_weights(trainingfile) -#modelap = keras.Sequential() -#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -#modelap.add(keras.layers.Dense(5, activation='relu')) -#modelap.add(keras.layers.Dense(a_size, activation='softmax')) -#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), -# loss='categorical_crossentropy', -# metrics=['accuracy']) -#modelap.load_weights(trainingfileap) - -print("\nSame domain:") +print("\n3000 zufällige Szenarien:") eval(clients) -#print("\nMan in the middle:") -#eval(clients2, aps2) - -print("\nHandover test") -eval_handover(handover[0], handover[1:]) - -print("\nHandover test 2") -eval_handover(handover2[0], handover2[1:]) +print("\Spezielle Szenarien:") +eval(special) -print("\nHandover test 3") -eval_handover(handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel1_cliap-apcli.py b/openAI_RRM/rrm_agent_evalmodel1_cliap-apcli.py deleted file mode 100644 index 4402740..0000000 --- a/openAI_RRM/rrm_agent_evalmodel1_cliap-apcli.py +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import gym -import UniFlexGym -import tensorflow as tf -import tensorflow.contrib.slim as slim -import numpy as np -from tensorflow import keras -import argparse -import logging -import time -import csv -import os -from math import * -from scipy.optimize import fsolve -from gym import spaces - -sortedIndecies = [] -ac_space = [] -BANDWITH_ON_CHANNEL = 54e6 -numChannels = 2 - -def normalize_state(state, ob_space, s_size): - global sortedIndecies - state = np.array(state) - - #sort states - index = np.arange(state.shape[0]) - index = index.reshape((-1,1)) - state = np.concatenate((state, index), axis=1) - # - state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) - sortedIndecies = state[:,-1] - state = np.delete(state, -1, axis=1) - state = np.reshape(state, [1, s_size]) - obspacehigh = np.reshape(ob_space.high, [1, s_size]) - #state = state *2 / obspacehigh - 1 - state = state -1 - - return state - -def map_action(mappedAction): - action = np.zeros(len(ac_space.nvec)) - for index in range(len(ac_space.nvec)): - # filter action by the index - ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) - ifaceaction = ifaceaction % ac_space.nvec[0] - #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) - #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) - #action[np.where(sortedIndecies == index)[0]] = ifaceaction - action[sortedIndecies[index]] = ifaceaction - return action - -def eval(clients, aps): - errorcounter_cli = 0 - errorcounter_ap = 0 - counter = 0 - - for client in clients: - for ap in aps: - state_cli = np.array([client['clients'], ap]) - state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) - actionap = np.argmax(modelap.predict(state_ap_norm)[0]) - actionvectorap = map_action(actionap) - - #success_cli = actionvector in client['valid'] - #success_ap = actionvectorap in client['valid'] - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - success_ap = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvectorap, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_ap = True - break - - print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) - print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) - counter += 1 - - if not success_ap: - errorcounter_ap +=1 - - if not success_cli: - errorcounter_cli +=1 - - print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") - print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") - -def calculate_reward(clients_p_ap, action): - reward = 0 - - for ap in range(len(action)): - channel = action[ap] - - #search num aps on same channel - same_chan = 0 - for act in action: - if act == channel: - same_chan += 1 - - ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan - reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) - return reward - -def eval_handover(aps, client, new_clients): - print("Current state:") - for ap in aps: - state_cli = np.array([client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - reward = calculate_reward(client['clients'], ap) - print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - - print("Handover simulation") - for new_client in new_clients: - for ap in aps: - state_cli = np.array([new_client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - reward = calculate_reward(new_client['clients'], actionvector) - - success_cli = False - for tmp in new_client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - print("[Cli, Ap]: Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - -ac_space = spaces.MultiDiscrete([2,2,2]) -ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) -trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test_ap-cli/logs/controller_3_112neuronalesNetz.train" -trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" - -clients = [{'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]}, - {'clients': [1, 2, 3], 'valid':[[1,1,0], [0,0,1]]}, - {'clients': [6, 0, 0], 'valid':[[1,0,0], [0,1,1]]}, - {'clients': [1, 5, 1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [2, 2, 2], 'valid':[[0,1,1], [1,0,1], [1,1,0], [0,1,0], [0,0,1], [1,0,0]]}, - {'clients': [5, 5, 1], 'valid':[[0,1,1], [1,0,1], [0,1,0], [1,0,0]]} - ] -clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} - ] -handover = [{'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'valid':[[1,0,1], [0,1,0]]} - ] - -handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, - {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} - ] - -handover3 = [{'clients': [2, 2, 1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, - {'clients': [2, 1, 2], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, - {'clients': [1, 2, 2], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} - ] - -aps = [[2,2,2], [1,1,0], [1,0,1], [0,1,1], [0,0,0]] -aps2 = [[1,2,1], [0,0,1]] - -#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], -# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], -# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] -#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], -# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], -# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] - -print("Observation space: ", ob_space, ob_space.dtype) -print("Action space: ", ac_space, ac_space.nvec) - -tmps_size = ob_space.shape -s_size = tmps_size[0] * tmps_size[1] -#s_size = list(map(lambda x: x * ob_space.high, s_size)) -a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) - -print("observation_space size:" + str(s_size)) -print("Data: Trained Data of setting 222 with sorting agent. Observation has actual neighbours. Experiment 1") - -model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -model.add(keras.layers.Dense(5, activation='relu')) -model.add(keras.layers.Dense(a_size, activation='softmax')) -model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) -model.load_weights(trainingfile) - -modelap = keras.Sequential() -modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -modelap.add(keras.layers.Dense(5, activation='relu')) -modelap.add(keras.layers.Dense(a_size, activation='softmax')) -modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) -modelap.load_weights(trainingfileap) - -print("\nSame domain:") -eval(clients, aps) - -print("\nMan in the middle:") -eval(clients2, aps2) - -print("\nHandover test") -eval_handover(aps, handover[0], handover[1:]) - -print("\nHandover test 2") -eval_handover(aps, handover2[0], handover2[1:]) - -print("\nHandover test 3") -eval_handover(aps, handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel2_sameset112.py b/openAI_RRM/rrm_agent_evalmodel2_sameset112.py deleted file mode 100644 index f066ef3..0000000 --- a/openAI_RRM/rrm_agent_evalmodel2_sameset112.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import gym -import UniFlexGym -import tensorflow as tf -import tensorflow.contrib.slim as slim -import numpy as np -from tensorflow import keras -import argparse -import logging -import time -import csv -import os -from math import * -from scipy.optimize import fsolve -from gym import spaces - -sortedIndecies = [] -ac_space = [] -BANDWITH_ON_CHANNEL = 54e6 -numChannels = 2 -SORT_VALUES = True - -def normalize_state(state, ob_space, s_size): - global sortedIndecies - state = np.array(state) - - #sort states - index = np.arange(state.shape[0]) - index = index.reshape((-1,1)) - state = np.concatenate((state, index), axis=1) - # - state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) - sortedIndecies = state[:,-1] - state = np.delete(state, -1, axis=1) - state = np.reshape(state, [1, s_size]) - obspacehigh = np.reshape(ob_space.high, [1, s_size]) - #state = state *2 / obspacehigh - 1 - state = state -1 - - return state - -def map_action(mappedAction): - action = np.zeros(len(ac_space.nvec)) - for index in range(len(ac_space.nvec)): - # filter action by the index - ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) - ifaceaction = ifaceaction % ac_space.nvec[0] - #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) - #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) - #action[np.where(sortedIndecies == index)[0]] = ifaceaction - action[sortedIndecies[index]] = ifaceaction - return action - -def eval(clients, aps): - errorcounter_cli = 0 - errorcounter_ap = 0 - counter = 0 - - for client in clients: - for ap in aps: - state_cli = np.array([client['clients'], ap]) - #state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - #state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) - #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) - #actionvectorap = map_action(actionap) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - #success_ap = False - #for tmp in client['valid']: - # tmpval = True - # for a, b in zip(actionvectorap, tmp): - # if a != b: - # tmpval = False - # break - # if tmpval: - # success_ap = True - # break - - print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) - #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) - counter += 1 - - #if not success_ap: - # errorcounter_ap +=1 - - if not success_cli: - errorcounter_cli +=1 - - print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") - #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") - -def calculate_reward(clients_p_ap, action): - reward = 0 - - for ap in range(len(action)): - channel = action[ap] - - #search num aps on same channel - same_chan = 0 - for act in action: - if act == channel: - same_chan += 1 - - ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan - reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) - return reward - -def get_best_reward(client, ap): - state_cli = np.array([client, ap]) - #state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - #state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - reward = calculate_reward(client, actionvector) - return reward - -def eval_handover(aps, client, new_clients): - print("Current state:") - for ap in aps: - state_cli = np.array([client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - reward = get_best_reward(client['clients'], ap) - - print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - - print("Handover simulation") - for new_client in new_clients: - for ap in aps: - state_cli = np.array([new_client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - reward = calculate_reward(new_client['clients'], actionvector) - - success_cli = False - for tmp in new_client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - print("[Cli, Ap]: Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - -ac_space = spaces.MultiDiscrete([2,2,2]) -ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) -#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" -trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/training3_sort_cli-stap_1set_112/logs/controller_3_112neuronalesNetz.train" - -clients = [{'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]}, - {'clients': [1, 2, 3], 'valid':[[1,1,0], [0,0,1]]}, - {'clients': [6, 0, 0], 'valid':[[1,0,0], [0,1,1]]}, - {'clients': [1, 5, 1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [2, 2, 2], 'valid':[[0,1,1], [1,0,1], [1,1,0], [0,1,0], [0,0,1], [1,0,0]]}, - {'clients': [5, 5, 1], 'valid':[[0,1,1], [1,0,1], [0,1,0], [1,0,0]]} - ] -clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} - ] -handover = [{'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'valid':[[1,0,1], [0,1,0]]} - ] - -handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, - {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} - ] - -handover3 = [{'clients': [2, 2, 1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, - {'clients': [2, 1, 2], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, - {'clients': [1, 2, 2], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} - ] - -aps = [[2,2,2]] -aps2 = [[1,2,1]] - -#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], -# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], -# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] -#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], -# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], -# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] - -print("Observation space: ", ob_space, ob_space.dtype) -print("Action space: ", ac_space, ac_space.nvec) - -tmps_size = ob_space.shape -s_size = tmps_size[0] * tmps_size[1] -#s_size = list(map(lambda x: x * ob_space.high, s_size)) -a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) - -print("observation_space size:" + str(s_size)) - -print("Data: Trained Data of Setting 112. Sorted by cli and aps Experiment 2") - -model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -model.add(keras.layers.Dense(5, activation='relu')) -model.add(keras.layers.Dense(a_size, activation='softmax')) -model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) -model.load_weights(trainingfile) - -#modelap = keras.Sequential() -#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -#modelap.add(keras.layers.Dense(5, activation='relu')) -#modelap.add(keras.layers.Dense(a_size, activation='softmax')) -#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), -# loss='categorical_crossentropy', -# metrics=['accuracy']) -#modelap.load_weights(trainingfileap) - -print("\nSame domain:") -eval(clients, aps) - -print("\nMan in the middle:") -eval(clients2, aps2) - -print("\nHandover test") -eval_handover(aps, handover[0], handover[1:]) - -print("\nHandover test 2") -eval_handover(aps, handover2[0], handover2[1:]) - -print("\nHandover test 3") -eval_handover(aps, handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel3_difset_sort_cli-ap.py b/openAI_RRM/rrm_agent_evalmodel3_difset_sort_cli-ap.py deleted file mode 100644 index c97b091..0000000 --- a/openAI_RRM/rrm_agent_evalmodel3_difset_sort_cli-ap.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import gym -import UniFlexGym -import tensorflow as tf -import tensorflow.contrib.slim as slim -import numpy as np -from tensorflow import keras -import argparse -import logging -import time -import csv -import os -from math import * -from scipy.optimize import fsolve -from gym import spaces - -sortedIndecies = [] -ac_space = [] -BANDWITH_ON_CHANNEL = 54e6 -numChannels = 2 - -def normalize_state(state, ob_space, s_size): - global sortedIndecies - state = np.array(state) - - #sort states - index = np.arange(state.shape[0]) - index = index.reshape((-1,1)) - state = np.concatenate((state, index), axis=1) - # - state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) - sortedIndecies = state[:,-1] - state = np.delete(state, -1, axis=1) - state = np.reshape(state, [1, s_size]) - obspacehigh = np.reshape(ob_space.high, [1, s_size]) - #state = state *2 / obspacehigh - 1 - state = state -1 - - return state - -def map_action(mappedAction): - action = np.zeros(len(ac_space.nvec)) - for index in range(len(ac_space.nvec)): - # filter action by the index - ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) - ifaceaction = ifaceaction % ac_space.nvec[0] - #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) - #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) - #action[np.where(sortedIndecies == index)[0]] = ifaceaction - action[sortedIndecies[index]] = ifaceaction - return action - -def eval(clients): - errorcounter_cli = 0 - errorcounter_ap = 0 - counter = 0 - - for client in clients: - ap = client['aps'] - state_cli = np.array([client['clients'], ap]) - #state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - #state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) - #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) - #actionvectorap = map_action(actionap) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - #success_ap = False - #for tmp in client['valid']: - # tmpval = True - # for a, b in zip(actionvectorap, tmp): - # if a != b: - # tmpval = False - # break - # if tmpval: - # success_ap = True - # break - - print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) - #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) - counter += 1 - - #if not success_ap: - # errorcounter_ap +=1 - - if not success_cli: - errorcounter_cli +=1 - - print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") - #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") - -def calculate_reward(clients_p_ap, action): - reward = 0 - - for ap in range(len(action)): - channel = action[ap] - - #search num aps on same channel - same_chan = 0 - for act in action: - if act == channel: - same_chan += 1 - - ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan - reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) - return reward - -def get_best_reward(client, ap): - state_cli = np.array([client, ap]) - #state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - #state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - reward = calculate_reward(client, actionvector) - return reward - -def eval_handover(client, new_clients): - print("Current state:") - ap = client['aps'] - state_cli = np.array([client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - reward = get_best_reward(client['clients'], ap) - - print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - - print("Handover simulation") - for new_client in new_clients: - ap = new_client['aps'] - state_cli = np.array([new_client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - reward = calculate_reward(new_client['clients'], actionvector) - - success_cli = False - for tmp in new_client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - print("[Cli, Ap]: Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - -ac_space = spaces.MultiDiscrete([2,2,2]) -ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) -#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" -trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/training3_sort_cli-stap_3set_190706_small/logs/controller_3_varSetsmallneuronalesNetz.train" - -clients = [ {'clients': [1, 1, 5], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, - {'clients': [1, 3, 2], 'aps': [2,2,2], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [5, 3, 4], 'aps': [2,2,2], 'valid':[[1,0,0], [0,1,1]]}, - {'clients': [5, 1, 3], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [2, 4, 2], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [7, 1, 5], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [4, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, - {'clients': [1, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]} - ] -#clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} -# ] -handover = [{'clients': [1, 5, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]} - ] - -handover2 = [{'clients': [1, 5, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]} - ] - -#handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, -# {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} -# ] - -handover3 = [{'clients': [2, 2, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, - {'clients': [2, 1, 2], 'aps': [1,2,1], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, - {'clients': [1, 2, 2], 'aps': [1,2,1], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} - ] - -#aps = [[2,2,2]] -#aps2 = [[1,2,1]] - -#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], -# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], -# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] -#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], -# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], -# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] - -print("Observation space: ", ob_space, ob_space.dtype) -print("Action space: ", ac_space, ac_space.nvec) - -tmps_size = ob_space.shape -s_size = tmps_size[0] * tmps_size[1] -#s_size = list(map(lambda x: x * ob_space.high, s_size)) -a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) - -print("observation_space size:" + str(s_size)) -print("Data: Trained Data of different settings with sorting agent. Experiment 3") - -model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -model.add(keras.layers.Dense(5, activation='relu')) -model.add(keras.layers.Dense(a_size, activation='softmax')) -model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) -model.load_weights(trainingfile) - -#modelap = keras.Sequential() -#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -#modelap.add(keras.layers.Dense(5, activation='relu')) -#modelap.add(keras.layers.Dense(a_size, activation='softmax')) -#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), -# loss='categorical_crossentropy', -# metrics=['accuracy']) -#modelap.load_weights(trainingfileap) - -print("\nSame domain:") -eval(clients) - -#print("\nMan in the middle:") -#eval(clients2, aps2) - -print("\nHandover test") -eval_handover(handover[0], handover[1:]) - -print("\nHandover test 2") -eval_handover(handover2[0], handover2[1:]) - -print("\nHandover test 3") -eval_handover(handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel4_difset_unsorted.py b/openAI_RRM/rrm_agent_evalmodel4_difset_unsorted.py deleted file mode 100644 index 0446cde..0000000 --- a/openAI_RRM/rrm_agent_evalmodel4_difset_unsorted.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import gym -import UniFlexGym -import tensorflow as tf -import tensorflow.contrib.slim as slim -import numpy as np -from tensorflow import keras -import argparse -import logging -import time -import csv -import os -from math import * -from scipy.optimize import fsolve -from gym import spaces - -sortedIndecies = [] -ac_space = [] -BANDWITH_ON_CHANNEL = 54e6 -numChannels = 2 -SORT_VALUES = False - -def normalize_state(state, ob_space, s_size): - global sortedIndecies - state = np.array(state) - - #sort states - index = np.arange(state.shape[0]) - index = index.reshape((-1,1)) - state = np.concatenate((state, index), axis=1) - # - if SORT_VALUES: - state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) - sortedIndecies = state[:,-1] - state = np.delete(state, -1, axis=1) - state = np.reshape(state, [1, s_size]) - obspacehigh = np.reshape(ob_space.high, [1, s_size]) - #state = state *2 / obspacehigh - 1 - state = state -1 - - return state - -def map_action(mappedAction): - action = np.zeros(len(ac_space.nvec)) - for index in range(len(ac_space.nvec)): - # filter action by the index - ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) - ifaceaction = ifaceaction % ac_space.nvec[0] - #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) - #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) - #action[np.where(sortedIndecies == index)[0]] = ifaceaction - action[sortedIndecies[index]] = ifaceaction - return action - -def eval(clients): - errorcounter_cli = 0 - errorcounter_ap = 0 - counter = 0 - - for client in clients: - ap = client['aps'] - state_cli = np.array([client['clients'], ap]) - #state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - #state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) - #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) - #actionvectorap = map_action(actionap) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - #success_ap = False - #for tmp in client['valid']: - # tmpval = True - # for a, b in zip(actionvectorap, tmp): - # if a != b: - # tmpval = False - # break - # if tmpval: - # success_ap = True - # break - - print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) - #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) - counter += 1 - - #if not success_ap: - # errorcounter_ap +=1 - - if not success_cli: - errorcounter_cli +=1 - - print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") - #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") - -def calculate_reward(clients_p_ap, action): - reward = 0 - - for ap in range(len(action)): - channel = action[ap] - - #search num aps on same channel - same_chan = 0 - for act in action: - if act == channel: - same_chan += 1 - - ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan - reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) - return reward - -def get_best_reward(client, ap): - state_cli = np.array([client, ap]) - #state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - #state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - reward = calculate_reward(client, actionvector) - return reward - -def eval_handover(client, new_clients): - print("Current state:") - ap = client['aps'] - state_cli = np.array([client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - reward = get_best_reward(client['clients'], ap) - - print("Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - - print("Handover simulation") - for new_client in new_clients: - ap = new_client['aps'] - state_cli = np.array([new_client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - reward = calculate_reward(new_client['clients'], actionvector) - - success_cli = False - for tmp in new_client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - print("Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - -ac_space = spaces.MultiDiscrete([2,2,2]) -ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) -#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" -trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/training4_unsort_3set_2/logs/controller_3_varSetneuronalesNetz.train" - -clients = [ {'clients': [1, 1, 5], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, - {'clients': [1, 3, 2], 'aps': [2,2,2], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [5, 3, 4], 'aps': [2,2,2], 'valid':[[1,0,0], [0,1,1]]}, - {'clients': [5, 1, 3], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [2, 4, 2], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [7, 1, 5], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [4, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, - {'clients': [1, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]} - ] -#clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} -# ] -handover = [{'clients': [1, 5, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]} - ] - -handover2 = [{'clients': [1, 5, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]} - ] - -#handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, -# {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} -# ] - -handover3 = [{'clients': [2, 2, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, - {'clients': [2, 1, 2], 'aps': [1,2,1], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, - {'clients': [1, 2, 2], 'aps': [1,2,1], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} - ] - -#aps = [[2,2,2]] -#aps2 = [[1,2,1]] - -#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], -# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], -# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] -#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], -# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], -# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] - -print("Observation space: ", ob_space, ob_space.dtype) -print("Action space: ", ac_space, ac_space.nvec) - -tmps_size = ob_space.shape -s_size = tmps_size[0] * tmps_size[1] -#s_size = list(map(lambda x: x * ob_space.high, s_size)) -a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) - -print("observation_space size:" + str(s_size)) - -print("Data: Trained Data of different settings with unsorted agent. Experiment 4") - -model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -model.add(keras.layers.Dense(5, activation='relu')) -model.add(keras.layers.Dense(a_size, activation='softmax')) -model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) -model.load_weights(trainingfile) - -#modelap = keras.Sequential() -#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -#modelap.add(keras.layers.Dense(5, activation='relu')) -#modelap.add(keras.layers.Dense(a_size, activation='softmax')) -#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), -# loss='categorical_crossentropy', -# metrics=['accuracy']) -#modelap.load_weights(trainingfileap) - -print("\nSame domain:") -eval(clients) - -#print("\nMan in the middle:") -#eval(clients2, aps2) - -print("\nHandover test") -eval_handover(handover[0], handover[1:]) - -print("\nHandover test 2") -eval_handover(handover2[0], handover2[1:]) - -print("\nHandover test 3") -eval_handover(handover3[0], handover3[1:]) diff --git a/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py b/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py deleted file mode 100644 index a643745..0000000 --- a/openAI_RRM/rrm_agent_evalmodel5_difset_sort_cli-ap_detecttopo.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import gym -import UniFlexGym -import tensorflow as tf -import tensorflow.contrib.slim as slim -import numpy as np -from tensorflow import keras -import argparse -import logging -import time -import csv -import os -from math import * -from scipy.optimize import fsolve -from gym import spaces - -sortedIndecies = [] -ac_space = [] -BANDWITH_ON_CHANNEL = 54e6 -numChannels = 2 -SORT_VALUES = True - -def normalize_state(state, ob_space, s_size): - global sortedIndecies - state = np.array(state) - - #sort states - index = np.arange(state.shape[0]) - index = index.reshape((-1,1)) - state = np.concatenate((state, index), axis=1) - # - if SORT_VALUES: - state = np.sort(state.view('i8,i8,i8'), order=['f0', 'f1'], axis=0).view(np.int) - sortedIndecies = state[:,-1] - state = np.delete(state, -1, axis=1) - state = np.reshape(state, [1, s_size]) - obspacehigh = np.reshape(ob_space.high, [1, s_size]) - #state = state *2 / obspacehigh - 1 - state = state -1 - - return state - -def map_action(mappedAction): - action = np.zeros(len(ac_space.nvec)) - for index in range(len(ac_space.nvec)): - # filter action by the index - ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) - ifaceaction = ifaceaction % ac_space.nvec[0] - #print("ifaceaction at " + str(index) + " is " + str(ifaceaction)) - #print("Find " + str(index) + "in sorted indecies" + str(sortedIndecies)+ "at" + str(np.where(sortedIndecies == index))) - #action[np.where(sortedIndecies == index)[0]] = ifaceaction - action[sortedIndecies[index]] = ifaceaction - return action - -def eval(clients): - errorcounter_cli = 0 - errorcounter_ap = 0 - counter = 0 - - for client in clients: - ap = client['aps'] - state_cli = np.array([client['clients'], ap]) - #state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - #state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - #state_ap_norm = normalize_state(state_ap.tolist(), ob_space, s_size) - #actionap = np.argmax(modelap.predict(state_ap_norm)[0]) - #actionvectorap = map_action(actionap) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - #success_ap = False - #for tmp in client['valid']: - # tmpval = True - # for a, b in zip(actionvectorap, tmp): - # if a != b: - # tmpval = False - # break - # if tmpval: - # success_ap = True - # break - - print("[Cli, Ap]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli)) - #print("[Ap, Cli]: Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(actionap) + ", Actionvector" + str(actionvectorap) + ", " + str(success_ap)) - counter += 1 - - #if not success_ap: - # errorcounter_ap +=1 - - if not success_cli: - errorcounter_cli +=1 - - print("Errors in [Cli,Ap]:" + str(errorcounter_cli) + "/" + str(counter) + "(" + str(errorcounter_cli/counter) + "%)") - #print("Errors in [Ap,Cli]:" + str(errorcounter_ap) + "/" + str(counter) + "(" + str(errorcounter_ap/counter) + "%)") - -def calculate_reward(clients_p_ap, action): - reward = 0 - - for ap in range(len(action)): - channel = action[ap] - - #search num aps on same channel - same_chan = 0 - for act in action: - if act == channel: - same_chan += 1 - - ap_bandwidth = BANDWITH_ON_CHANNEL/ same_chan - reward += clients_p_ap[ap] * sqrt(ap_bandwidth/clients_p_ap[ap]) - return reward - -def get_best_reward(client, ap): - state_cli = np.array([client, ap]) - #state_ap = np.array([ap, client['clients']]) - - state_cli = state_cli.transpose() - #state_ap = state_ap.transpose() - - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - reward = calculate_reward(client, actionvector) - return reward - -def eval_handover(client, new_clients): - print("Current state:") - ap = client['aps'] - state_cli = np.array([client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - - success_cli = False - for tmp in client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - reward = get_best_reward(client['clients'], ap) - - print("Cli:" + str(client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - - print("Handover simulation") - for new_client in new_clients: - ap = new_client['aps'] - state_cli = np.array([new_client['clients'], ap]) - - state_cli = state_cli.transpose() - state_cli_norm = normalize_state(state_cli.tolist(), ob_space, s_size) - action = np.argmax(model.predict(state_cli_norm)[0]) - actionvector = map_action(action) - reward = calculate_reward(new_client['clients'], actionvector) - - success_cli = False - for tmp in new_client['valid']: - tmpval = True - for a, b in zip(actionvector, tmp): - if a != b: - tmpval = False - break - if tmpval: - success_cli = True - break - - print("Cli:" + str(new_client['clients']) + ", AP:" + str(ap) + ", Action:" +str(action) + ", Actionvector" + str(actionvector) + ", " + str(success_cli) + ", reward:" + str(reward)) - - -ac_space = spaces.MultiDiscrete([2,2,2]) -ob_space = spaces.Box(low=0, high=6, shape=(ac_space.nvec.shape[0],2), dtype=np.uint32) -#trainingfileap = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/test/logs/controller_3_112neuronalesNetz.train" -trainingfile = "/home/sascha/tu-cloud/Uni/Module/Bachelorarbeit_TI/Messungsautomatisierung/simulationMeasurements_2/Training_120_2_80/logs/controller_3_varSetneuronalesNetz.train" - -clients = [ {'clients': [1, 1, 2], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, - {'clients': [1, 1, 1], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [1, 1, 5], 'aps': [2,2,2], 'valid':[[1,1,0], [0,0,1]]}, - {'clients': [1, 3, 2], 'aps': [2,2,2], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [5, 3, 4], 'aps': [2,2,2], 'valid':[[1,0,0], [0,1,1]]}, - {'clients': [5, 1, 3], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [2, 4, 2], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [7, 1, 5], 'aps': [1,2,1], 'valid':[[0,1,0], [1,0,1]]}, - {'clients': [4, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, - {'clients': [1, 1, 1], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]}, - {'clients': [1, 3, 2], 'aps': [1,0,1], 'valid':[[0,1,1], [0,0,1], [1,1,0], [1,0,0]]} - ] -#clients2 = [{'clients': [1, 1, 2], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [1, 2, 3], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [6, 0, 0], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [1, 5, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [5, 1, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [2, 2, 2], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [5, 5, 1], 'valid':[[1,0,1], [0,1,0]]} -# ] -handover = [{'clients': [1, 5, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'aps': [2,2,2], 'valid':[[1,0,1], [0,1,0]]} - ] - -handover2 = [{'clients': [1, 5, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [2, 4, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]}, - {'clients': [1, 4, 2], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0]]} - ] - -#handover2 = [{'clients': [1, 2, 1], 'valid':[[1,0,1], [0,1,0]]}, -# {'clients': [2, 1, 1], 'valid':[[0,1,1], [1,0,0]]}, -# {'clients': [1, 1, 2], 'valid':[[1,1,0], [0,0,1]]} -# ] - -handover3 = [{'clients': [2, 2, 1], 'aps': [1,2,1], 'valid':[[1,0,1], [0,1,0], [1,0,0], [0,1,1]]}, - {'clients': [2, 1, 2], 'aps': [1,2,1], 'valid':[[0,1,1], [1,0,0], [1,1,0], [0,0,1]]}, - {'clients': [1, 2, 2], 'aps': [1,2,1], 'valid':[[1,1,0], [0,0,1], [1,0,1], [0,1,0]]} - ] - -#aps = [[2,2,2]] -#aps2 = [[1,2,1]] - -#states = [[[1,2],[1,2],[2,2]], [[1,2],[2,2],[3,2]], [[6,2],[0,2],[0,2]], [[1,2],[5,2],[1,2]], [[2,2],[2,2],[2,2]], -# [[1,1],[1,0],[2,1]], [[1,1],[2,1],[3,0]], [[6,1],[0,0],[0,1]], [[1,1],[5,1],[1,0]], [[2,0],[2,1],[2,1]], -# [[2,1],[2,2],[2,1]], [[3,1],[2,2],[2,1]]] -#states = [[[2,1],[2,1],[2,2]], [[2,1],[2,2],[2,3]], [[2,6],[2,0],[2,0]], [[2,1],[2,5],[2,1]], [[2,2],[2,2],[2,2]], -# [[0,1],[1,1],[1,2]], [[1,1],[0,2],[1,3]], [[1,6],[1,0],[0,0]], [[1,1],[1,5],[0,1]], [[1,2],[0,2],[1,2]], -# [[1,2],[2,2],[1,2]], [[1,3],[2,2],[1,2]]] - -print("Observation space: ", ob_space, ob_space.dtype) -print("Action space: ", ac_space, ac_space.nvec) - -tmps_size = ob_space.shape -s_size = tmps_size[0] * tmps_size[1] -#s_size = list(map(lambda x: x * ob_space.high, s_size)) -a_size = pow(ac_space.nvec[0], ac_space.nvec.shape[0]) - -print("observation_space size:" + str(s_size)) -print("Data: Trained Data of different settings with sorting agent. Experiment 3") - -model = keras.Sequential() -model.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='relu')) -#model.add(keras.layers.Dense(5, activation='relu')) -model.add(keras.layers.Dense(a_size, activation='softmax')) -model.compile(optimizer=tf.train.AdamOptimizer(0.001), - loss='categorical_crossentropy', - metrics=['accuracy']) -model.load_weights(trainingfile) - -#modelap = keras.Sequential() -#modelap.add(keras.layers.Dense(s_size, input_shape=(s_size,), activation='sigmoid')) -#modelap.add(keras.layers.Dense(5, activation='relu')) -#modelap.add(keras.layers.Dense(a_size, activation='softmax')) -#modelap.compile(optimizer=tf.train.AdamOptimizer(0.001), -# loss='categorical_crossentropy', -# metrics=['accuracy']) -#modelap.load_weights(trainingfileap) - -print("\nSame domain:") -eval(clients) - -#print("\nMan in the middle:") -#eval(clients2, aps2) - -print("\nHandover test") -eval_handover(handover[0], handover[1:]) - -print("\nHandover test 2") -eval_handover(handover2[0], handover2[1:]) - -print("\nHandover test 3") -eval_handover(handover3[0], handover3[1:]) diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index b449952..0ecfdcd 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -99,8 +99,8 @@ def map_action(mappedAction): next_state, reward, done, _ = env.step(actionVector) #hysteresis - #if action != lastaction and abs(reward - lastreward) < 0.1: - # reward = reward * 0.75 + if action != lastaction and abs(reward - lastreward) < 0.1: + reward = reward * 0.75 lastaction = action lastreward = reward diff --git a/openAI_RRM/thompson_agent_hysteresis_schnell.py b/openAI_RRM/thompson_agent2.py similarity index 100% rename from openAI_RRM/thompson_agent_hysteresis_schnell.py rename to openAI_RRM/thompson_agent2.py diff --git a/openAI_RRM/thompson_agent_hysteresis.py b/openAI_RRM/thompson_agent_hysteresis.py deleted file mode 100644 index 0ecfdcd..0000000 --- a/openAI_RRM/thompson_agent_hysteresis.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import gym -import UniFlexGym -#import tensorflow as tf -#import tensorflow.contrib.slim as slim -import numpy as np -#from tensorflow import keras -import argparse -import logging -import time -import csv -import os -from math import * - - -parser = argparse.ArgumentParser(description='Uniflex reader') -parser.add_argument('--config', help='path to the uniflex config file', default=None) -parser.add_argument('--output', help='path to a csv file for agent output data', default=None) -parser.add_argument('--plot', help='activate plotting', default=None) -parser.add_argument('--steptime', help='interval between two steps', default=1) -parser.add_argument('--steps', help='number of steps in this execution. If not set, the agents runs infinitly long', default=None) - -args = parser.parse_args() -if not args.config: - print("No config file specified!") - os._exit(1) -if not args.output: - print("No output file specified! - Skip data") - -if args.plot: - import matplotlib.pyplot as plt - -ac_space = [] - -def map_action(mappedAction): - action = np.zeros(len(ac_space.nvec)) - for index in range(len(ac_space.nvec)): - # filter action by the index - ifaceaction = int(mappedAction / (pow(ac_space.nvec[0] ,index))) - ifaceaction = ifaceaction % ac_space.nvec[0] - action[index] = ifaceaction - return action - - -#create uniflex environment -env = gym.make('uniflex-v0') -#env.configure() -env.start_controller(steptime=float(args.steptime), config=args.config) - -numChannels = 2 -episode = 1 - -while True: - run = 0 - - state = env.reset() - n = 0 - ac_space = env.action_space - ob_space = env.observation_space - print("Observation space: ", ob_space, ob_space.dtype) - print("Action space: ", ac_space, ac_space.nvec) - - a_size = 0 - if len(ac_space.nvec) > 0: - a_size = int(pow(ac_space.nvec[0], ac_space.nvec.shape[0])) - - avg = [] - num = [] - maxreward = 1 - lastreward = 0 - lastaction = 0 - - done = False - - if a_size == 0: - print("there is no vaild AP - sleep 10 seconds") - time.sleep(2) - continue - - aps = int(log(a_size, numChannels)) - - for i in range(a_size): - avg.append(0) - num.append(0) - - while not done: - # generate random values - randval = [] - for i in range(a_size): - randval.append(np.random.normal(avg[i]/maxreward, 1/(pow(num[i],1) + 1), 1)) - - # take index of highest value - action = np.argmax(randval) - - #execute step - actionVector = map_action(action) - next_state, reward, done, _ = env.step(actionVector) - - #hysteresis - if action != lastaction and abs(reward - lastreward) < 0.1: - reward = reward * 0.75 - lastaction = action - lastreward = reward - - # add reward for further execution - avg[action] = (avg[action] * num[action] + reward) / (num[action] + 2) - num[action] += 1 - - maxreward = np.maximum(maxreward, reward) - - # statistics - if args.output: - with open(args.output, 'a') as csvFile: - writer = csv.writer(csvFile) - writer.writerow([reward, action, episode]) - csvFile.close() - - print ("Reward: " + str(reward)) - print ("GameOver: " + str(done)) - print ("Next Channels: " + str(next_state)) - print ("Channel selection:" + str(action)) - print ("Average:" + str(avg)) - print ("next step") - - if args.plot: - plt.subplot(211) - plt.plot(run, reward, 'bo') # Additional point - plt.ylabel('reward') - plt.subplot(212) - plt.plot(run, action, 'bo') # Additional point - plt.ylabel('action') - plt.xlabel('step') - plt.pause(0.05) - - run += 1 - - if args.steps and int(args.steps) <= run: - os._exit(1) - - episode += 1 From 245ccb8edc55cec392411a1730f8bd9a06e089b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sat, 2 Nov 2019 21:07:22 +0100 Subject: [PATCH 53/54] set config required --- openAI_RRM/rl_agent.py | 2 +- openAI_RRM/rl_agent_multi.py | 2 +- openAI_RRM/thompson_agent.py | 2 +- openAI_RRM/thompson_agent2.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/openAI_RRM/rl_agent.py b/openAI_RRM/rl_agent.py index fb39828..9f5c9e6 100644 --- a/openAI_RRM/rl_agent.py +++ b/openAI_RRM/rl_agent.py @@ -114,7 +114,7 @@ def normalize_reward(reward, rewardpow, action): return reward parser = argparse.ArgumentParser(description='Uniflex reader') -parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--config', help='path to the uniflex config file', default=None, required=True) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) parser.add_argument('--steptime', help='interval between two steps', default=1) diff --git a/openAI_RRM/rl_agent_multi.py b/openAI_RRM/rl_agent_multi.py index bc1b4e9..d6ebdae 100644 --- a/openAI_RRM/rl_agent_multi.py +++ b/openAI_RRM/rl_agent_multi.py @@ -123,7 +123,7 @@ def normalize_reward(reward, rewardpow, action): parser = argparse.ArgumentParser(description='Uniflex reader') -parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--config', help='path to the uniflex config file', default=None, required=True) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) parser.add_argument('--steptime', help='interval between two steps', default=1) diff --git a/openAI_RRM/thompson_agent.py b/openAI_RRM/thompson_agent.py index 0ecfdcd..05fa76e 100644 --- a/openAI_RRM/thompson_agent.py +++ b/openAI_RRM/thompson_agent.py @@ -16,7 +16,7 @@ parser = argparse.ArgumentParser(description='Uniflex reader') -parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--config', help='path to the uniflex config file', default=None, required=True) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) parser.add_argument('--steptime', help='interval between two steps', default=1) diff --git a/openAI_RRM/thompson_agent2.py b/openAI_RRM/thompson_agent2.py index ce3d508..42c91a7 100644 --- a/openAI_RRM/thompson_agent2.py +++ b/openAI_RRM/thompson_agent2.py @@ -16,7 +16,7 @@ parser = argparse.ArgumentParser(description='Uniflex reader') -parser.add_argument('--config', help='path to the uniflex config file', default=None) +parser.add_argument('--config', help='path to the uniflex config file', default=None, required=True) parser.add_argument('--output', help='path to a csv file for agent output data', default=None) parser.add_argument('--plot', help='activate plotting', default=None) parser.add_argument('--steptime', help='interval between two steps', default=1) From 7a0c161d66e33e4bf19571edb5db46e00adaba68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sascha=20R=C3=B6sler?= Date: Sat, 2 Nov 2019 21:17:16 +0100 Subject: [PATCH 54/54] fix readme --- openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt | 3 ++- .../SimulationSlavesConfig/Setting2_2unsym/readme.txt | 3 ++- openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt | 3 ++- openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt | 3 ++- .../SimulationSlavesConfig/Setting3_varSet/readme.txt | 2 +- .../SimulationSlavesConfig/Setting8_22213122/readme.txt | 3 ++- openAI_RRM/readme.txt | 6 ++++-- 7 files changed, 15 insertions(+), 8 deletions(-) diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt index bc913c7..6522f4e 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2/readme.txt @@ -4,7 +4,8 @@ source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master_simulation.yaml +python3 rl_agent.py --config ./config_master_simulation.yaml +# you can choose thompson_agent.py or thompson_agent2.py, too # 2c. Run modules in slave node: #Simulation diff --git a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt index bc913c7..6522f4e 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting2_2unsym/readme.txt @@ -4,7 +4,8 @@ source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master_simulation.yaml +python3 rl_agent.py --config ./config_master_simulation.yaml +# you can choose thompson_agent.py or thompson_agent2.py, too # 2c. Run modules in slave node: #Simulation diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt index c1d3666..4cb054e 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_112/readme.txt @@ -4,7 +4,8 @@ source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master_simulation.yaml +python3 rl_agent.py --config ./config_master_simulation.yaml +# you can choose thompson_agent.py or thompson_agent2.py, too # 2c. Run modules in slave node: #Simulation diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt index c1d3666..4cb054e 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_222/readme.txt @@ -4,7 +4,8 @@ source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master_simulation.yaml +python3 rl_agent.py --config ./config_master_simulation.yaml +# you can choose thompson_agent.py or thompson_agent2.py, too # 2c. Run modules in slave node: #Simulation diff --git a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt index c1d3666..5b6ee7b 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting3_varSet/readme.txt @@ -4,7 +4,7 @@ source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master_simulation.yaml +python3 rl_agent_multi.py --config ./config_master_simulation.yaml # 2c. Run modules in slave node: #Simulation diff --git a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt index c0734b5..1f1c50f 100644 --- a/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt +++ b/openAI_RRM/SimulationSlavesConfig/Setting8_22213122/readme.txt @@ -4,7 +4,8 @@ source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master_simulation.yaml +python3 rl_agent.py --config ./config_master_simulation.yaml +# you can choose thompson_agent.py or thompson_agent2.py, too # 2c. Run modules in slave node: #Simulation diff --git a/openAI_RRM/readme.txt b/openAI_RRM/readme.txt index 61b5e23..e8944b0 100644 --- a/openAI_RRM/readme.txt +++ b/openAI_RRM/readme.txt @@ -4,8 +4,10 @@ source ~/Uniflex/dev/bin/activate # 2a. Run control program in master node: uniflex-broker # 2b. Run control program in master node: -python3 rrm_agent.py --config ./config_master.yaml -python3 rrm_agent.py --config ./config_master_simulation.yaml +python3 rl_agent.py --config ./config_master.yaml +# or +python3 rl_agent.py --config ./config_master_simulation.yaml +# you can choose rl_agent_multi.py, thompson_agent.py or thompson_agent2.py, too # 2c. Run modules in slave node: #Linux WiFi AP