From c08d8bbbd752f3ff450be961c85f545a20428b6e Mon Sep 17 00:00:00 2001 From: "Gu,YingX" Date: Mon, 27 Apr 2020 10:17:43 +0800 Subject: [PATCH 01/91] Update the script for reslove the config files Signed-off-by: Gu,YingX --- .gitignore | 5 + CMakeLists.txt | 8 + deployment/kubernetes/CMakeLists.txt | 1 + deployment/kubernetes/build.sh | 66 +++++ .../kubernetes/cdn-service-deployment.yaml.m4 | 35 +++ .../kubernetes/cdn-service-service.yaml.m4 | 17 ++ .../kafka-service-deployment.yaml.m4 | 61 ++++ .../kubernetes/kafka-service-service.yaml.m4 | 14 + .../live-service-deployment.yaml.m4 | 28 ++ .../redis-service-deployment.yaml.m4 | 31 +++ .../kubernetes/redis-service-service.yaml.m4 | 13 + deployment/kubernetes/run_with_command.py | 260 +++++------------- deployment/kubernetes/start.sh | 25 +- deployment/kubernetes/stop.sh | 3 +- deployment/kubernetes/update_yaml.py | 31 ++- .../kubernetes/vod-service-deployment.yaml.m4 | 32 +++ deployment/kubernetes/yaml_utils.py | 7 +- .../zookeeper-service-deployment.yaml.m4 | 44 +++ .../zookeeper-service-service.yaml.m4 | 14 + script/deployment.cmake | 3 +- script/service.cmake | 2 +- 21 files changed, 475 insertions(+), 225 deletions(-) create mode 100755 deployment/kubernetes/build.sh create mode 100644 deployment/kubernetes/cdn-service-deployment.yaml.m4 create mode 100644 deployment/kubernetes/cdn-service-service.yaml.m4 create mode 100644 deployment/kubernetes/kafka-service-deployment.yaml.m4 create mode 100644 deployment/kubernetes/kafka-service-service.yaml.m4 create mode 100644 deployment/kubernetes/live-service-deployment.yaml.m4 create mode 100644 deployment/kubernetes/redis-service-deployment.yaml.m4 create mode 100644 deployment/kubernetes/redis-service-service.yaml.m4 create mode 100644 deployment/kubernetes/vod-service-deployment.yaml.m4 create mode 100644 deployment/kubernetes/zookeeper-service-deployment.yaml.m4 create mode 100644 deployment/kubernetes/zookeeper-service-service.yaml.m4 diff --git a/.gitignore b/.gitignore index 882027a..f503fa5 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,8 @@ volume/video/* deployment/docker-swarm/dhparam.pem deployment/docker-swarm/self.crt deployment/docker-swarm/self.key +deployment/kubernetes/*.yaml +deployment/kubernetes/*.cfg +deployment/kubernetes/__pycache__/ +deployment/certificate/self.crt +deployment/certificate/self.key diff --git a/CMakeLists.txt b/CMakeLists.txt index 3765883..48821d0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,13 @@ cmake_minimum_required (VERSION 2.8) Project(OVC NONE) +if (NOT DEFINED NVODS) + set(NVODS "2") +endif() +if (NOT DEFINED NLIVES) + set(NLIVES "1") +endif() + file(GLOB dirs "deployment" "*") list(REMOVE_DUPLICATES dirs) foreach(dir ${dirs}) @@ -12,3 +19,4 @@ endforeach() # legal message execute_process(COMMAND printf "\nThis script will build third party components licensed under various open source licenses into your container images. The terms under which those components may be used and distributed can be found with the license document that is provided with those components. Please familiarize yourself with those terms to ensure your distribution of those components complies with the terms of those licenses.\n\n") +execute_process(COMMAND printf "\n-- Setting: NVODS=${NVODS}, NLIVES=${NLIVES}\n") diff --git a/deployment/kubernetes/CMakeLists.txt b/deployment/kubernetes/CMakeLists.txt index e85ab85..a0b76ba 100644 --- a/deployment/kubernetes/CMakeLists.txt +++ b/deployment/kubernetes/CMakeLists.txt @@ -1,2 +1,3 @@ set(service "kubernetes") +include("${CMAKE_SOURCE_DIR}/script/service.cmake") include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh new file mode 100755 index 0000000..3557f8a --- /dev/null +++ b/deployment/kubernetes/build.sh @@ -0,0 +1,66 @@ +#!/bin/bash -e + +DIR=$(dirname $(readlink -f "$0")) +NVODS="${1:-1}" +NLIVES="${2:-1}" + +echo "Generating templates with NVODS=${NVODS}, NLIVES=${NLIVES}" + +find "${DIR}" -maxdepth 1 -name "*.yaml" -exec rm -rf "{}" \; +find "${DIR}" -maxdepth 1 -name "*.cfg" -exec rm -rf "{}" \; + +for template in $(find "${DIR}" -maxdepth 1 -name "*.yaml.m4" -print); do + yaml=${template/.m4/} + m4 -DNVODS=${NVODS} -I "${DIR}" "${template}" > "${yaml}" +done + +cat <> ${DIR}/cpu_mem_managerment.cfg +[cdn] +cpu = 2 +mem = 2000 +[redis] +cpu = 0.5 +mem = 500 +[zookeeper] +cpu = 1 +mem = 500 +[kafka] +cpu = 1 +mem = 500 +EOF + +for ((VODIDX=0;VODIDX<${NVODS};VODIDX++)); do + cat <> ${DIR}/cpu_mem_managerment.cfg +[vod${VODIDX}] +cpu = 1.5 +mem = 3000 +EOF + cat <> ${DIR}/transcode.cfg +[vod${VODIDX}] +hwaccel = false +EOF +done + +for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do + cat <> ${DIR}/cpu_mem_managerment.cfg +[live${LIVEIDX}] +cpu = 1.5 +mem = 3000 +EOF + cat <> ${DIR}/transcode.cfg +[live${LIVEIDX}] +url = bbb_sunflower_1080p_30fps_normal.mp4 +width_height = 856x480 +bitrate = 8000000 +framerate = 25 +gop = 100 +maxbFrames = 2 +refsNum = 2 +rcMode = 0 +preset = veryfast +encoder_type = AVC +protocol = HLS +hwaccel = false +density = 2 +EOF +done diff --git a/deployment/kubernetes/cdn-service-deployment.yaml.m4 b/deployment/kubernetes/cdn-service-deployment.yaml.m4 new file mode 100644 index 0000000..8e54731 --- /dev/null +++ b/deployment/kubernetes/cdn-service-deployment.yaml.m4 @@ -0,0 +1,35 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: cdn-service + name: cdn-service +spec: + replicas: 1 + template: + metadata: + creationTimestamp: null + labels: + app: cdn-service + spec: + containers: + - args: + - bash + - -c + - /home/main.py&/usr/local/sbin/nginx + image: ovc_cdn_service:latest + imagePullPolicy: IfNotPresent + name: cdn-service + ports: + - containerPort: 8080 + - containerPort: 1935 + resources: + limits: + cpu: "3" + memory: 3145728e3 + requests: + cpu: 1500m + memory: 1572864e3 + nodeSelector: + kubernetes.io/hostname: master.machine + restartPolicy: Always diff --git a/deployment/kubernetes/cdn-service-service.yaml.m4 b/deployment/kubernetes/cdn-service-service.yaml.m4 new file mode 100644 index 0000000..14ee3aa --- /dev/null +++ b/deployment/kubernetes/cdn-service-service.yaml.m4 @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: cdn-service + name: cdn-service +spec: + ports: + - name: "8080" + port: 8080 + targetPort: 8080 + - name: "1935" + port: 1935 + targetPort: 1935 + selector: + app: cdn-service + type: NodePort diff --git a/deployment/kubernetes/kafka-service-deployment.yaml.m4 b/deployment/kubernetes/kafka-service-deployment.yaml.m4 new file mode 100644 index 0000000..9b3e8bf --- /dev/null +++ b/deployment/kubernetes/kafka-service-deployment.yaml.m4 @@ -0,0 +1,61 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: kafka-service + name: kafka-service +spec: + replicas: 1 + template: + metadata: + labels: + app: kafka-service + spec: + containers: + - env: + - name: KAFKA_ADVERTISED_HOST_NAME + value: kafka-service + - name: KAFKA_ADVERTISED_LISTENERS + value: PLAINTEXT://kafka-service:9092 + - name: KAFKA_ADVERTISED_PORT + value: "9092" + - name: KAFKA_AUTO_CREATE_TOPICS_ENABLE + value: "true" + - name: KAFKA_BROKER_ID + value: "1" + - name: KAFKA_CREATE_TOPICS + value: content_provider_sched:16:1 + - name: KAFKA_DEFAULT_REPLICATION_FACTOR + value: "1" + - name: KAFKA_HEAP_OPTS + value: -Xmx1024m -Xms1024m + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: PLAINTEXT + - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP + value: PLAINTEXT:PLAINTEXT + - name: KAFKA_LOG4J_LOGGERS + value: kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR + - name: KAFKA_LOG4J_ROOT_LOGLEVEL + value: ERROR + - name: KAFKA_LOG_RETENTION_HOURS + value: "8" + - name: KAFKA_NUM_PARTITIONS + value: "16" + - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR + value: "1" + - name: KAFKA_ZOOKEEPER_CONNECT + value: zookeeper-service:2181 + image: wurstmeister/kafka:latest + name: kafka-service + ports: + - containerPort: 9092 + resources: + limits: + cpu: "2" + memory: 1048576e3 + requests: + cpu: "1" + memory: 524288e3 + nodeSelector: + kubernetes.io/hostname: master.machine + restartPolicy: Always diff --git a/deployment/kubernetes/kafka-service-service.yaml.m4 b/deployment/kubernetes/kafka-service-service.yaml.m4 new file mode 100644 index 0000000..a729a70 --- /dev/null +++ b/deployment/kubernetes/kafka-service-service.yaml.m4 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: kafka-service + name: kafka-service +spec: + ports: + - name: "9092" + port: 9092 + targetPort: 9092 + selector: + app: kafka-service + type: NodePort diff --git a/deployment/kubernetes/live-service-deployment.yaml.m4 b/deployment/kubernetes/live-service-deployment.yaml.m4 new file mode 100644 index 0000000..cb3ed88 --- /dev/null +++ b/deployment/kubernetes/live-service-deployment.yaml.m4 @@ -0,0 +1,28 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: live-service + name: live-service +spec: + replicas: 1 + template: + metadata: + creationTimestamp: null + labels: + app: live-service + spec: + containers: + - image: ovc_software_transcode_service:latest + imagePullPolicy: IfNotPresent + name: live-service + resources: + limits: + cpu: "6" + memory: 6291456e3 + requests: + cpu: "3" + memory: 3145728e3 + nodeSelector: + kubernetes.io/hostname: master.machine + restartPolicy: Always diff --git a/deployment/kubernetes/redis-service-deployment.yaml.m4 b/deployment/kubernetes/redis-service-deployment.yaml.m4 new file mode 100644 index 0000000..9559267 --- /dev/null +++ b/deployment/kubernetes/redis-service-deployment.yaml.m4 @@ -0,0 +1,31 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: redis-service + name: redis-service +spec: + replicas: 1 + template: + metadata: + creationTimestamp: null + labels: + app: redis-service + spec: + containers: + - args: + - redis-server + image: redis:latest + name: redis-service + ports: + - containerPort: 6379 + resources: + limits: + cpu: "2" + memory: 1048576e3 + requests: + cpu: "1" + memory: 524288e3 + nodeSelector: + kubernetes.io/hostname: master.machine + restartPolicy: Always diff --git a/deployment/kubernetes/redis-service-service.yaml.m4 b/deployment/kubernetes/redis-service-service.yaml.m4 new file mode 100644 index 0000000..22bd6ad --- /dev/null +++ b/deployment/kubernetes/redis-service-service.yaml.m4 @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-service + name: redis-service +spec: + ports: + - name: "6379" + port: 6379 + targetPort: 6379 + selector: + app: redis-service diff --git a/deployment/kubernetes/run_with_command.py b/deployment/kubernetes/run_with_command.py index 9959d31..8cfd3f7 100755 --- a/deployment/kubernetes/run_with_command.py +++ b/deployment/kubernetes/run_with_command.py @@ -11,7 +11,6 @@ import functools sys.path.append(sys.argv[1]) - def get_node_num(): node_num = int(os.popen( "kubectl get node | awk '{print $1}' | sed -n '2, $p' | wc -l").read()) @@ -142,200 +141,60 @@ def input_node_name(service_name, pods_dict, image_name="sw"): pods_dict[service_name]["node"] = node_name return pods_dict -def input_request_cpu(service_name, node_dict, pods_dict): - cpu_quota = input("Please input run " + - service_name + " request cpu core number: ") - while True: - if re.match(r"\d{1,2}(\.\d+)?$", cpu_quota) and node_dict[pods_dict[service_name]["node"]]["cpu"] > float(cpu_quota) > 0: - node_dict[pods_dict[service_name] - ["node"]]["cpu"] -= float(cpu_quota) - pods_dict[service_name]["cpu"] = float(cpu_quota) - break - else: - cpu_quota = input("Input error, please input run " + - service_name + " request cpu core number again: ") - return node_dict, pods_dict - -def input_request_mem(service_name, node_dict, pods_dict): - mem_quota = input("Please input run " + service_name + - " request memory quota(MiB): ") - while True: - if re.match(r"\d{3,5}$", mem_quota) and node_dict[pods_dict[service_name]["node"]]["memory"] > int(mem_quota) > 0: - node_dict[pods_dict[service_name]["node"] - ]["memory"] -= int(mem_quota) - pods_dict[service_name]["memory"] = int(mem_quota) - break - else: - mem_quota = input("Input error, please input run " + - service_name + " request memory quota(MiB) again: ") +def input_request_cpu(service_name, node_dict, pods_dict, cpu_quota): + if re.match(r"\d{1,2}(\.\d+)?$", cpu_quota) and node_dict[pods_dict[service_name]["node"]]["cpu"] > float(cpu_quota) > 0: + node_dict[pods_dict[service_name] + ["node"]]["cpu"] -= float(cpu_quota) + pods_dict[service_name]["cpu"] = float(cpu_quota) + else: + print("Error: Overload! Pleaes redistribute cpu request in cpu_mem_managerment.cfg") + os._exit() return node_dict, pods_dict -def deploy_transcode_cluster(service_name): - ret = input("Do you need to deploy the " + service_name + - " transcode service? ([y] or [n]): ") - while True: - if ret.lower() == "y": - configure_transcode_service(service_name) - break - elif ret.lower() == "n": - break - else: - ret = input("Input error, do you need to deploy the " + - service_name + " transcode service? ([y] or [n]): ") - -def configure_live_transcode_args(service_name, deploy_type, image_name): - if deploy_type == "auto": - pods_dict[service_name]["input"] = video_list[0] - pods_dict[service_name]["transcode0"] = { - 'codec': 'AVC', 'protocol': 'HLS', 'resolution': '856:480', 'bitrate': '5', 'output': 'output_name'} - return - - if len(video_list) == 1: - input_video = video_list[0] +def input_request_mem(service_name, node_dict, pods_dict, mem_quota): + if re.match(r"\d{3,5}$", mem_quota) and node_dict[pods_dict[service_name]["node"]]["memory"] > int(mem_quota) > 0: + node_dict[pods_dict[service_name]["node"] + ]["memory"] -= int(mem_quota) + pods_dict[service_name]["memory"] = int(mem_quota) else: - input_video = input( - "Please choose the one video clip to transcode (" + str(video_list)[1:-1] + "): ") - while True: - if input_video in video_list: - break - else: - input_video = input( - "Input error, please choose the one video clip to transcode again (" + str(video_list)[1:-1] + "): ") - pods_dict[service_name]["input"] = input_video - - output_channel = input("Please choose the output channel (1, 2 ,3, 4): ") - while True: - if output_channel in ["1", "2", "3", "4"]: - break - else: - output_channel = input( - "Input error, please choose the output channel again (1, 2 ,3, 4): ") - - output_dict = {} - protocol_dict = {"a": "HLS", "b": "DASH"} - protocol_str = ', '.join([("\033[0;31;40m" + key + "\033[0m: " + value) - for key, value in protocol_dict.items()]) - resolution_dict = {"a": ["hd480", "856:480"], "b": ["hd720", "1280:720"], "c": [ - "hd1080", "1920:1080"], "d": ["2kqhd", "2560:1440"]} - resolution_str = ', '.join([("\033[0;31;40m" + key + "\033[0m: " + value[0]) - for key, value in resolution_dict.items()]) - codec_dict = {"sw": {"AVC": "libx264", "HEVC": "libsvt_hevc", - "AV1": "libsvt_av1"}, "hw": {"AVC": "h264_vaapi", "HEVC": "hevc_vaapi"}} - - for i in range(int(output_channel)): - pods_dict[service_name]["transcode" + str(i)] = {} - codec = input("Please choose the %dth output encoder (%s): " % ( - i + 1, str(list(codec_dict[image_name].keys()))[1:-1])) - while True: - if codec_dict[image_name].get(codec.upper()): - break - else: - codec = input("Input error, please choose the %dth output encoder again (%s): " % ( - i + 1, str(list(codec_dict[image_name].keys()))[1:-1])) - - pods_dict[service_name]["transcode" + str(i)]["codec"] = codec.upper() - - resolution_key = input( - "Please choose the %dth output resolution (%s): " % (i + 1, resolution_str)) - while True: - if resolution_key.lower() in resolution_dict.keys(): - break - else: - resolution_key = input( - "Input error, please choose the %dth output resolution again(%s): " % (i + 1, resolution_str)) - - pods_dict[service_name]["transcode" + - str(i)]["resolution"] = resolution_dict[resolution_key.lower()][1] - - bitrate = input( - "Please enter the %dth output bitrate([1-20]Mbps): " % (i + 1)) - while True: - if re.match(r"(([1-9])|(1\d))$", bitrate): - break - else: - bitrate = input( - "Input error, please enter the %dth output bitrate again([1-20]Mbps): " % (i + 1)) - - pods_dict[service_name]["transcode" + str(i)]["bitrate"] = bitrate - - protocol_key = input( - "Please choose the %dth output streaming media communication protocol(%s): " % (i + 1, protocol_str)) - while True: - if protocol_key.lower() in protocol_dict.keys(): - break - else: - protocol_key = input( - "Input error, please choose the %dth output streaming media communication protocol again (%s): " % (i + 1, protocol_str)) - - pods_dict[service_name]["transcode" + - str(i)]["protocol"] = protocol_dict[protocol_key.lower()] + print("Error: Overload! Pleaes redistribute memory request in cpu_mem_managerment.cfg") + os._exit() + return node_dict, pods_dict - output_name = input( - "Please enter the %dth output video clip name: " % (i + 1)) - while True: - if re.match(r'^[^\\\s/:\*\?"<>\|]+$', output_name): - if output_name in output_dict.keys(): - output_name = input( - "The output video clip name already exists, please enter the %dth output video clip name again: " % (i + 1)) - else: - break - else: - output_name = input( - "Input error, please enter the %dth output video clip name again: " % (i + 1)) - pods_dict[service_name]["transcode" + str(i)]["output"] = output_name +def configure_live_transcode_args(service_name, num, trans_cfg_dict, image_name): + pods_dict[service_name]["input"] = trans_cfg_dict[service_name]['url'] + for trans_num in range(int(trans_cfg_dict[service_name]['density'])): + pods_dict[service_name]["transcode" + str(trans_num)] = { + 'codec': trans_cfg_dict[service_name]['encoder_type'], 'protocol': trans_cfg_dict[service_name]['protocol'], 'resolution': trans_cfg_dict[service_name]['width_height'], 'bitrate': trans_cfg_dict[service_name]['bitrate'], 'framerate':trans_cfg_dict[service_name]['framerate'], 'gop': trans_cfg_dict[service_name]['gop'], 'maxbFrames': trans_cfg_dict[service_name]['maxbframes'], 'refsNum': trans_cfg_dict[service_name]['refsnum'], 'preset': trans_cfg_dict[service_name]['preset'], 'output': 'output_name'} return -def configure_transcode_service(service_name): +def configure_transcode_service(service_name, num, trans_cfg_dict): global hw_node_num - i = 0 - while True: - service_name_index = re.search( - "((vod)|(live))(\d*)", service_name).group(1) + str(i) - pods.append(service_name_index) - pods_dict[service_name_index] = {} - if hw_node_num > 0: - image_name = input("Please choose the transcode mode of the " + str(i) + "th" + service_name + - " ([hw]: hardware is for E3/VCA2 or [sw]: software is for E5): ") - while True: - if image_name.lower() == "sw" or image_name.lower() == "hw": - hw_node_num -= 1 if image_name.lower() == "hw" else 0 - break - else: - image_name = input("Input error, please choose the transcode mode of the " + str(i) + "th" + service_name + - " again ([hw]: hardware is for E3/VCA2 or [sw]: software is for E5): ") - else: - image_name = "sw" - pods_dict[service_name_index]["mode"] = image_name - - if re.search("live\d+", service_name_index): - deploy_type = input( - "Do you need to deploy live-transcode-service by customizing parameters([y] or [n]): ") - while True: - if deploy_type.lower() == "y": - deploy_type = "manual" - break - elif deploy_type.lower() == "n": - deploy_type = "auto" - break - else: - deploy_type = input( - "Input error, do you need to deploy live-transcode-service by customizing parameters([y] or [n]): ") - - configure_live_transcode_args( - service_name_index, deploy_type, image_name.lower()) - - i += 1 - create_node = input("Do you still need to deploy the " + - str(i + 1) + "th " + service_name + "? ([y] or [n]): ") - while True: - if create_node.lower() == "y" or create_node.lower() == "n": - break - else: - create_node = input("Input error, do you still need to deploy the " + - str(i + 1) + "th " + service_name + "? ([y] or [n]): ") - if create_node.lower() == "n": - break + for i in range(int(num)): + service_name_index = re.search( + "((vod)|(live))(\d*)", service_name).group(1) + str(i) + pods.append(service_name_index) + pods_dict[service_name_index] = {} + if hw_node_num > 0: + if trans_cfg_dict[service_name_index]['hwaccel'] == 'true': + image_name = "hw" + elif trans_cfg_dict[service_name_index]['hwaccel'] == 'false': + image_name = "sw" + while True: + if image_name.lower() == "sw" or image_name.lower() == "hw": + hw_node_num -= 1 if image_name.lower() == "hw" else 0 + break + else: + image_name = input("Input error, please choose the transcode mode of the " + str(i) + "th" + service_name + + " again ([hw]: hardware is for E3/VCA2 or [sw]: software is for E5): ") + else: + image_name = "sw" + pods_dict[service_name_index]["mode"] = image_name + + if re.search("live\d+", service_name_index): + configure_live_transcode_args( + service_name_index, num, trans_cfg_dict, image_name.lower()) def get_node_information(): node_dict = {} @@ -355,6 +214,14 @@ def get_node_information(): ).group(1)] = {"cpu": cpu, "memory": memory} return node_dict +def get_config(config_file): + import configparser + config = configparser.ConfigParser() + config.read(config_file) + config_dict = dict(config._sections) + for k in config_dict: + config_dict[k] = dict(config_dict[k]) + return config_dict node_num = get_node_num() @@ -369,13 +236,20 @@ def get_node_information(): node_dict = get_node_information() pods = ["cdn", "redis", "zookeeper", "kafka"] -deploy_transcode_cluster("vod") -deploy_transcode_cluster("live") +DIRS = sys.argv[1] +NVODS = sys.argv[2] +NLIVES = sys.argv[3] +transcode_cfg = DIRS + '/transcode.cfg' +cpu_mem_cfg = DIRS + '/cpu_mem_managerment.cfg' +trans_cfg_dict = get_config(transcode_cfg) +cpu_mem_cfg_dict = get_config(cpu_mem_cfg) + +configure_transcode_service("vod", NVODS, trans_cfg_dict) +configure_transcode_service("live", NLIVES, trans_cfg_dict) for pod in pods: pods_dict = input_node_name(pod, pods_dict) - node_dict, pods_dict = input_request_cpu(pod, node_dict, pods_dict) - node_dict, pods_dict = input_request_mem(pod, node_dict, pods_dict) + node_dict, pods_dict = input_request_cpu(pod, node_dict, pods_dict, cpu_mem_cfg_dict[pod]['cpu']) + node_dict, pods_dict = input_request_mem(pod, node_dict, pods_dict, cpu_mem_cfg_dict[pod]['mem']) -update_yaml.update_yaml(nfs_server, volume_directory, sys.argv[1], - pods, pods_dict, get_node_information()) +update_yaml.update_yaml(nfs_server, volume_directory, sys.argv[1], pods, pods_dict, get_node_information(),trans_cfg_dict) diff --git a/deployment/kubernetes/start.sh b/deployment/kubernetes/start.sh index 5262a95..4693f18 100755 --- a/deployment/kubernetes/start.sh +++ b/deployment/kubernetes/start.sh @@ -3,8 +3,6 @@ DIR=$(dirname $(readlink -f "$0")) export NGINX_LOG_VOLUME=$(readlink -f "/var/log/nginx") -EXT=*.yaml - # Set Bash color ECHO_PREFIX_INFO="\033[1;32;40mINFO...\033[0;0m" ECHO_PREFIX_ERROR="\033[1;31;40mError...\033[0;0m" @@ -62,26 +60,15 @@ for i in $(find "$DIR" -maxdepth 1 -name "*certificates.yaml"); do done done -rm -rf $DIR/$EXT +rm -rf $DIR/../../volume/video/hls/* +rm -rf $DIR/../../volume/video/dash/* sudo mkdir -p "${NGINX_LOG_VOLUME}" -yml="$DIR/docker-compose-template.yml" -test -f "$yml" - -dcv="$(kompose version | cut -f1 -d' ')" -mdcv="$(printf '%s\n' $dcv 1.16 | sort -r -V | head -n 1)" -if test "$mdcv" = "1.16"; then - echo "" - echo "kompose >=1.16 is required." - echo "Please upgrade kompose at https://docs.docker.com/compose/install." - echo "" - exit 0 -fi - -kompose convert -f "$yml" -o "$DIR" - -"$DIR/run_with_command.py" "$DIR" +NVODS="${2:-1}" +NLIVES="${3:-1}" +echo "Generating yamls with NVODS=${NVODS}, NLIVES=${NLIVES}" +"$DIR/run_with_command.py" "$DIR" ${NVODS} ${NLIVES} "$DIR/../certificate/self-sign.sh" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) diff --git a/deployment/kubernetes/stop.sh b/deployment/kubernetes/stop.sh index 6cefa28..5c507bf 100755 --- a/deployment/kubernetes/stop.sh +++ b/deployment/kubernetes/stop.sh @@ -48,4 +48,5 @@ done kubectl delete secret self-signed-certificate 2> /dev/null || echo -n "" -rm -rf $DIR/$EXT +rm -rf $DIR/../../volume/video/hls/* +rm -rf $DIR/../../volume/video/dash/* diff --git a/deployment/kubernetes/update_yaml.py b/deployment/kubernetes/update_yaml.py index aa3c8f0..c203fec 100755 --- a/deployment/kubernetes/update_yaml.py +++ b/deployment/kubernetes/update_yaml.py @@ -19,7 +19,7 @@ def get_host_ip(): return host_ip -def update_yaml(nfs_server, volume_directory, dir_path, pods, pods_dict, node_dict): +def update_yaml(nfs_server, volume_directory, dir_path, pods, pods_dict, node_dict,trans_cfg_dict): host_ip = get_host_ip() sys.path.append(dir_path) import yaml_utils @@ -64,12 +64,33 @@ def update_yaml(nfs_server, volume_directory, dir_path, pods, pods_dict, node_di if re.search("live\d", pod): live_args = { 'input_video': pods_dict[pod]["input"], "output_dict": {}} - codec_dict = {"AVC": "libx264", - "HEVC": "libsvt_hevc", "AV1": "libsvt_av1"} + if trans_cfg_dict[pod]['hwaccel'] == 'false': + if trans_cfg_dict[pod]['protocol'] == 'DASH': + if trans_cfg_dict[pod]['encoder_type'] == 'AVC' or trans_cfg_dict[pod]['encoder_type'] == 'HEVC' or trans_cfg_dict[pod]['encoder_type'] == 'AV1': + codec_dict = {"AVC": "libx264", "HEVC": "libsvt_hevc", "AV1": "libsvt_av1"} + else: + print("Error: Only support AVC/HEVC/AV1! Please input correct encoder_type in transcode.cfg (" + pod + ")") + os._exit() + elif trans_cfg_dict[pod]['protocol'] == 'HLS': + if trans_cfg_dict[pod]['encoder_type'] == 'AVC' or trans_cfg_dict[pod]['encoder_type'] == 'HEVC': + codec_dict = {"AVC": "libx264", "HEVC": "libsvt_hevc"} + else: + print("Error: Only support AVC/HEVC! Please input correct encoder_type in transcode.cfg (" + pod + ")") + os._exit() + else: + print("Error: Please input correct protocol(HLS/DASH) in transcode.cfg (" + pod + ")") + os._exit() + elif trans_cfg_dict[pod]['hwaccel'] == 'true': + if trans_cfg_dict[pod]['encoder_type'] == 'AVC' or trans_cfg_dict[pod]['encoder_type'] == 'HEVC': + codec_dict = {"AVC": "h264_vaapi", "HEVC": "hevc_vaapi"} + else: + print("Error: Only support AVC/HEVC! Please input correct encoder_type in transcode.cfg (" + pod + ")") + os._exit() for num in range(4): if pods_dict[pod].get("transcode" + str(num), None) and pods_dict[pod]["transcode" + str(num)].get("protocol", None) and pods_dict[pod]["transcode" + str(num)].get("resolution", None) and pods_dict[pod]["transcode" + str(num)].get("bitrate", None) and pods_dict[pod]["transcode" + str(num)].get("codec", None) and pods_dict[pod]["transcode" + str(num)].get("output", None): - live_args["output_dict"][pods_dict[pod]["transcode" + str(num)]["output"] + "_" + re.search("live(\d+)", pod).group(1) + "_" + str(num)] = [pods_dict[pod]["transcode" + str( - num)]["protocol"].lower(), pods_dict[pod]["transcode" + str(num)]["resolution"], pods_dict[pod]["transcode" + str(num)]["bitrate"], codec_dict[pods_dict[pod]["transcode" + str(num)]["codec"]]] + + live_args["output_dict"][pods_dict[pod]["transcode" + str(num)]["output"] + "_" + re.search("live(\d+)", pod).group(1) + "_" + str(num)] = [pods_dict[pod]["transcode" + str(num)]["protocol"].lower(), pods_dict[pod]["transcode" + str(num)]["resolution"], pods_dict[pod]["transcode" + str(num)]["bitrate"], codec_dict[pods_dict[pod]["transcode" + str(num)]["codec"]], pods_dict[pod]["transcode" + str(num)]["framerate"], pods_dict[pod]["transcode" + str(num)]["gop"], pods_dict[pod]["transcode" + str(num)]["maxbFrames"], pods_dict[pod]["transcode" + str(num)]["refsNum"], pods_dict[pod]["transcode" + str(num)]["preset"]] + print("\033[0;31;40mhttps://%s/%s/%s/index.%s\033[0m" % (host_ip, pods_dict[pod]["transcode" + str(num)]["protocol"].lower(), pods_dict[pod]["transcode" + str(num)] ["output"] + "_" + re.search("live(\d+)", pod).group(1) + "_" + str(num), "m3u8" if pods_dict[pod]["transcode" + str(num)]["protocol"].lower() == "hls" else "mpd")) data = yaml_utils.update_command( diff --git a/deployment/kubernetes/vod-service-deployment.yaml.m4 b/deployment/kubernetes/vod-service-deployment.yaml.m4 new file mode 100644 index 0000000..fb80464 --- /dev/null +++ b/deployment/kubernetes/vod-service-deployment.yaml.m4 @@ -0,0 +1,32 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: vod-service + name: vod-service +spec: + replicas: 1 + template: + metadata: + creationTimestamp: null + labels: + app: vod-service + spec: + containers: + - args: + - bash + - -c + - /home/main.py + image: ovc_software_transcode_service:latest + imagePullPolicy: IfNotPresent + name: vod-service + resources: + limits: + cpu: "6" + memory: 6291456e3 + requests: + cpu: "3" + memory: 3145728e3 + nodeSelector: + kubernetes.io/hostname: master.machine + restartPolicy: Always diff --git a/deployment/kubernetes/yaml_utils.py b/deployment/kubernetes/yaml_utils.py index ef83592..8a579a1 100755 --- a/deployment/kubernetes/yaml_utils.py +++ b/deployment/kubernetes/yaml_utils.py @@ -18,8 +18,8 @@ def dump_yaml_file(data, fileName): def update_service_name(data, service_name): data["metadata"]["name"] = service_name - data["spec"]["template"]["metadata"]["labels"]["io.kompose.service"] = service_name - data["metadata"]["labels"]["io.kompose.service"] = service_name + data["spec"]["template"]["metadata"]["labels"]["app"] = service_name + data["metadata"]["labels"]["app"] = service_name data["spec"]["template"]["spec"]["containers"][0]["name"] = service_name return data @@ -33,7 +33,7 @@ def update_command(data, imageName, live_args): for key, value in live_args['output_dict'].items(): data['spec']['template']['spec']['containers'][0]['lifecycle']['preStop']['exec']['command'].append( " /var/www/" + value[0] + '/' + key ) thread = " -thread_count 96" if value[3].find('libsvt') != -1 else "" - command += ' -vf ' + scale_dict[imageName] + '=' + value[1] + ' -c:v ' + value[3] + ' -b:v ' + value[2] + 'M -g 32 -forced-idr 1' + thread + ' -an -f flv rtmp://cdn-service/' + value[0] + '/' + key + command += ' -vf ' + scale_dict[imageName] + '=' + value[1] + ' -c:v ' + value[3] + ' -b:v ' + value[2] + ' -r ' + value[4] + ' -g ' + value[5] + ' -bf ' + value[6] + ' -refs ' + value[7] + ' -preset ' + value[8] + ' -forced-idr 1' + thread + ' -an -f flv rtmp://cdn-service/' + value[0] + '/' + key command_caps = ['bash', '-c', command + ' -abr_pipeline'] data['spec']['template']['spec']['containers'][0].update( @@ -43,7 +43,6 @@ def update_command(data, imageName, live_args): def update_imageName(data, imageName, isVOD): if imageName == "hw": data['spec']['template']['spec']['containers'][0]['resources']['limits']['gpu.intel.com/i915'] = 1 - return data def update_nodeSelector(data, nodeName): diff --git a/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 b/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 new file mode 100644 index 0000000..771591d --- /dev/null +++ b/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 @@ -0,0 +1,44 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: zookeeper-service + name: zookeeper-service +spec: + replicas: 1 + template: + metadata: + creationTimestamp: null + labels: + app: zookeeper-service + spec: + containers: + - env: + - name: ZOOKEEPER_CLIENT_PORT + value: "2181" + - name: ZOOKEEPER_HEAP_OPTS + value: -Xmx2048m -Xms2048m + - name: ZOOKEEPER_LOG4J_LOGGERS + value: zookeepr=ERROR + - name: ZOOKEEPER_LOG4J_ROOT_LOGLEVEL + value: ERROR + - name: ZOOKEEPER_MAX_CLIENT_CNXNS + value: "20000" + - name: ZOOKEEPER_SERVER_ID + value: "1" + - name: ZOOKEEPER_TICK_TIME + value: "2000" + image: zookeeper:latest + name: zookeeper-service + ports: + - containerPort: 2181 + resources: + limits: + cpu: "2" + memory: 1048576e3 + requests: + cpu: "1" + memory: 524288e3 + nodeSelector: + kubernetes.io/hostname: master.machine + restartPolicy: Always diff --git a/deployment/kubernetes/zookeeper-service-service.yaml.m4 b/deployment/kubernetes/zookeeper-service-service.yaml.m4 new file mode 100644 index 0000000..3bfaaba --- /dev/null +++ b/deployment/kubernetes/zookeeper-service-service.yaml.m4 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: zookeeper-service + name: zookeeper-service +spec: + ports: + - name: "2181" + port: 2181 + targetPort: 2181 + selector: + app: zookeeper-service + type: NodePort diff --git a/script/deployment.cmake b/script/deployment.cmake index b1de87c..0283519 100644 --- a/script/deployment.cmake +++ b/script/deployment.cmake @@ -1,3 +1,2 @@ -add_custom_target(deploy_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}") -add_custom_target(start_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}") +add_custom_target(start_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}" "${NVODS}" "${NLIVES}") add_custom_target(stop_${service} "${CMAKE_CURRENT_SOURCE_DIR}/stop.sh" "${service}") diff --git a/script/service.cmake b/script/service.cmake index 6169f76..bdc02c2 100644 --- a/script/service.cmake +++ b/script/service.cmake @@ -1,3 +1,3 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/build.sh") - add_custom_target(build_${service} ALL "${CMAKE_CURRENT_SOURCE_DIR}/build.sh") + add_custom_target(build_${service} ALL "${CMAKE_CURRENT_SOURCE_DIR}/build.sh" "${NVODS}" "${NLIVES}") endif() From c45c8b666b9bf704299d65bfe91613b16fbef539 Mon Sep 17 00:00:00 2001 From: "Gu,YingX" Date: Tue, 28 Apr 2020 14:19:24 +0800 Subject: [PATCH 02/91] Update to Kubernetes 1.18.2 Signed-off-by: Gu,YingX --- deployment/kubernetes/build.sh | 10 +++++----- deployment/kubernetes/cdn-service-deployment.yaml.m4 | 5 ++++- .../kubernetes/kafka-service-deployment.yaml.m4 | 5 ++++- deployment/kubernetes/live-service-deployment.yaml.m4 | 5 ++++- deployment/kubernetes/logging/start_logging.sh | 2 +- .../kubernetes/monitoring/grafana/grafana-deploy.yaml | 2 +- .../monitoring/heapster/heapster-deploy.yaml | 7 +++++-- .../monitoring/node-export/node-exporter-deploy.yaml | 2 +- .../monitoring/prometheus/prometheus-deploy.yaml | 2 +- deployment/kubernetes/monitoring/start_monitoring.sh | 2 +- .../kubernetes/redis-service-deployment.yaml.m4 | 5 ++++- deployment/kubernetes/run_with_command.py | 11 +++++++---- deployment/kubernetes/vod-service-deployment.yaml.m4 | 5 ++++- deployment/kubernetes/yaml_utils.py | 1 + .../kubernetes/zookeeper-service-deployment.yaml.m4 | 5 ++++- script/Kubernetes_remove.sh | 2 +- script/Kubernetes_setup_master.sh | 8 ++++---- script/Kubernetes_setup_node.sh | 4 ++-- 18 files changed, 54 insertions(+), 29 deletions(-) diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh index 3557f8a..7196064 100755 --- a/deployment/kubernetes/build.sh +++ b/deployment/kubernetes/build.sh @@ -19,7 +19,7 @@ cat <> ${DIR}/cpu_mem_managerment.cfg cpu = 2 mem = 2000 [redis] -cpu = 0.5 +cpu = 1 mem = 500 [zookeeper] cpu = 1 @@ -32,10 +32,10 @@ EOF for ((VODIDX=0;VODIDX<${NVODS};VODIDX++)); do cat <> ${DIR}/cpu_mem_managerment.cfg [vod${VODIDX}] -cpu = 1.5 +cpu = 3 mem = 3000 EOF - cat <> ${DIR}/transcode.cfg + cat <> ${DIR}/vod-transcode.cfg [vod${VODIDX}] hwaccel = false EOF @@ -44,10 +44,10 @@ done for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do cat <> ${DIR}/cpu_mem_managerment.cfg [live${LIVEIDX}] -cpu = 1.5 +cpu = 4 mem = 3000 EOF - cat <> ${DIR}/transcode.cfg + cat <> ${DIR}/live-transcode.cfg [live${LIVEIDX}] url = bbb_sunflower_1080p_30fps_normal.mp4 width_height = 856x480 diff --git a/deployment/kubernetes/cdn-service-deployment.yaml.m4 b/deployment/kubernetes/cdn-service-deployment.yaml.m4 index 8e54731..d5124ed 100644 --- a/deployment/kubernetes/cdn-service-deployment.yaml.m4 +++ b/deployment/kubernetes/cdn-service-deployment.yaml.m4 @@ -1,10 +1,13 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: cdn-service name: cdn-service spec: + selector: + matchLabels: + app: cdn-service replicas: 1 template: metadata: diff --git a/deployment/kubernetes/kafka-service-deployment.yaml.m4 b/deployment/kubernetes/kafka-service-deployment.yaml.m4 index 9b3e8bf..a7b372f 100644 --- a/deployment/kubernetes/kafka-service-deployment.yaml.m4 +++ b/deployment/kubernetes/kafka-service-deployment.yaml.m4 @@ -1,10 +1,13 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: kafka-service name: kafka-service spec: + selector: + matchLabels: + app: kafka-service replicas: 1 template: metadata: diff --git a/deployment/kubernetes/live-service-deployment.yaml.m4 b/deployment/kubernetes/live-service-deployment.yaml.m4 index cb3ed88..f5a80ea 100644 --- a/deployment/kubernetes/live-service-deployment.yaml.m4 +++ b/deployment/kubernetes/live-service-deployment.yaml.m4 @@ -1,10 +1,13 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: live-service name: live-service spec: + selector: + matchLabels: + app: live-service replicas: 1 template: metadata: diff --git a/deployment/kubernetes/logging/start_logging.sh b/deployment/kubernetes/logging/start_logging.sh index bda6b6b..d2c5115 100755 --- a/deployment/kubernetes/logging/start_logging.sh +++ b/deployment/kubernetes/logging/start_logging.sh @@ -23,7 +23,7 @@ set +e try_command hash kubectl > /dev/null set -e -kubectl create secret generic kibana-ssl-certificates --namespace=kube-system --from-file=self.key="$DIR/../../../self-certificates/self.key" --from-file=self.crt="$DIR/../../../self-certificates/self.crt" --dry-run -o yaml > "$DIR/kibana-ssl-certificates.yaml" +kubectl create secret generic kibana-ssl-certificates --namespace=kube-system --from-file=self.key="$DIR/../../certificate/self.key" --from-file=self.crt="$DIR/../../certificate/self.crt" --dry-run -o yaml > "$DIR/kibana-ssl-certificates.yaml" for i in $(find "$DIR" -name "*.yaml"); do kubectl create -f "$i" diff --git a/deployment/kubernetes/monitoring/grafana/grafana-deploy.yaml b/deployment/kubernetes/monitoring/grafana/grafana-deploy.yaml index c0bd9f2..62969a8 100644 --- a/deployment/kubernetes/monitoring/grafana/grafana-deploy.yaml +++ b/deployment/kubernetes/monitoring/grafana/grafana-deploy.yaml @@ -1,4 +1,4 @@ -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: k8s-pgmon-grafana diff --git a/deployment/kubernetes/monitoring/heapster/heapster-deploy.yaml b/deployment/kubernetes/monitoring/heapster/heapster-deploy.yaml index f9c8ab4..018a2d7 100644 --- a/deployment/kubernetes/monitoring/heapster/heapster-deploy.yaml +++ b/deployment/kubernetes/monitoring/heapster/heapster-deploy.yaml @@ -1,14 +1,17 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: heapster namespace: kube-system spec: replicas: 1 + selector: + matchLabels: + app: heapster template: metadata: labels: - k8s-app: heapster + app: heapster spec: serviceAccountName: admin containers: diff --git a/deployment/kubernetes/monitoring/node-export/node-exporter-deploy.yaml b/deployment/kubernetes/monitoring/node-export/node-exporter-deploy.yaml index 403a202..90c597a 100644 --- a/deployment/kubernetes/monitoring/node-export/node-exporter-deploy.yaml +++ b/deployment/kubernetes/monitoring/node-export/node-exporter-deploy.yaml @@ -1,5 +1,5 @@ --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: DaemonSet metadata: labels: diff --git a/deployment/kubernetes/monitoring/prometheus/prometheus-deploy.yaml b/deployment/kubernetes/monitoring/prometheus/prometheus-deploy.yaml index ca18aaf..1c778d1 100644 --- a/deployment/kubernetes/monitoring/prometheus/prometheus-deploy.yaml +++ b/deployment/kubernetes/monitoring/prometheus/prometheus-deploy.yaml @@ -1,5 +1,5 @@ --- -apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: k8s-pgmon-prometheus diff --git a/deployment/kubernetes/monitoring/start_monitoring.sh b/deployment/kubernetes/monitoring/start_monitoring.sh index d72d448..00a46b3 100755 --- a/deployment/kubernetes/monitoring/start_monitoring.sh +++ b/deployment/kubernetes/monitoring/start_monitoring.sh @@ -25,7 +25,7 @@ set -e kubectl create -f "$DIR/namespace/namespace.yaml" -kubectl create secret generic grafana-ssl-certificates --namespace=pgmon --from-file=self.key="$DIR/../../../self-certificates/self.key" --from-file=self.crt="$DIR/../../../self-certificates/self.crt" --dry-run -o yaml > "$DIR/grafana-ssl-certificates.yaml" +kubectl create secret generic grafana-ssl-certificates --namespace=pgmon --from-file=self.key="$DIR/../../certificate/self.key" --from-file=self.crt="$DIR/../../certificate/self.crt" --dry-run -o yaml > "$DIR/grafana-ssl-certificates.yaml" for i in $(find "$DIR" -path "$DIR/namespace" -a -prune -o -name "*.yaml" -print); do kubectl create -f "$i" diff --git a/deployment/kubernetes/redis-service-deployment.yaml.m4 b/deployment/kubernetes/redis-service-deployment.yaml.m4 index 9559267..0425eb7 100644 --- a/deployment/kubernetes/redis-service-deployment.yaml.m4 +++ b/deployment/kubernetes/redis-service-deployment.yaml.m4 @@ -1,10 +1,13 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: redis-service name: redis-service spec: + selector: + matchLabels: + app: redis-service replicas: 1 template: metadata: diff --git a/deployment/kubernetes/run_with_command.py b/deployment/kubernetes/run_with_command.py index 8cfd3f7..feebeab 100755 --- a/deployment/kubernetes/run_with_command.py +++ b/deployment/kubernetes/run_with_command.py @@ -239,13 +239,16 @@ def get_config(config_file): DIRS = sys.argv[1] NVODS = sys.argv[2] NLIVES = sys.argv[3] -transcode_cfg = DIRS + '/transcode.cfg' +live_transcode_cfg = DIRS + '/live-transcode.cfg' +vod_transcode_cfg = DIRS + '/vod-transcode.cfg' cpu_mem_cfg = DIRS + '/cpu_mem_managerment.cfg' -trans_cfg_dict = get_config(transcode_cfg) +live_trans_cfg_dict = get_config(live_transcode_cfg) +vod_trans_cfg_dict = get_config(vod_transcode_cfg) cpu_mem_cfg_dict = get_config(cpu_mem_cfg) +trans_cfg_dict = {**live_trans_cfg_dict, **vod_trans_cfg_dict} -configure_transcode_service("vod", NVODS, trans_cfg_dict) -configure_transcode_service("live", NLIVES, trans_cfg_dict) +configure_transcode_service("vod", NVODS, vod_trans_cfg_dict) +configure_transcode_service("live", NLIVES, live_trans_cfg_dict) for pod in pods: pods_dict = input_node_name(pod, pods_dict) diff --git a/deployment/kubernetes/vod-service-deployment.yaml.m4 b/deployment/kubernetes/vod-service-deployment.yaml.m4 index fb80464..826f674 100644 --- a/deployment/kubernetes/vod-service-deployment.yaml.m4 +++ b/deployment/kubernetes/vod-service-deployment.yaml.m4 @@ -1,10 +1,13 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: vod-service name: vod-service spec: + selector: + matchLabels: + app: vod-service replicas: 1 template: metadata: diff --git a/deployment/kubernetes/yaml_utils.py b/deployment/kubernetes/yaml_utils.py index 8a579a1..eb28753 100755 --- a/deployment/kubernetes/yaml_utils.py +++ b/deployment/kubernetes/yaml_utils.py @@ -21,6 +21,7 @@ def update_service_name(data, service_name): data["spec"]["template"]["metadata"]["labels"]["app"] = service_name data["metadata"]["labels"]["app"] = service_name data["spec"]["template"]["spec"]["containers"][0]["name"] = service_name + data["spec"]["selector"]["matchLabels"]["app"] = service_name return data def update_command(data, imageName, live_args): diff --git a/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 b/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 index 771591d..118f052 100644 --- a/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 +++ b/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 @@ -1,10 +1,13 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: labels: app: zookeeper-service name: zookeeper-service spec: + selector: + matchLabels: + app: zookeeper-service replicas: 1 template: metadata: diff --git a/script/Kubernetes_remove.sh b/script/Kubernetes_remove.sh index 1e142ff..26a4719 100755 --- a/script/Kubernetes_remove.sh +++ b/script/Kubernetes_remove.sh @@ -42,7 +42,7 @@ try_command lsb_release -si > /dev/null LINUX_DISTRO=`lsb_release -si` if [ "$LINUX_DISTRO" == "Ubuntu" ]; then - try_command apt-get remove kubelet kubeadm kubectl + try_command apt-get purge kubelet kubeadm kubectl try_command apt -y autoremove elif [ "$LINUX_DISTRO" == "CentOS" ]; then try_command yum autoremove kubelet kubeadm kubectl diff --git a/script/Kubernetes_setup_master.sh b/script/Kubernetes_setup_master.sh index 46e015e..225e4fa 100755 --- a/script/Kubernetes_setup_master.sh +++ b/script/Kubernetes_setup_master.sh @@ -62,7 +62,7 @@ if [ "$LINUX_DISTRO" == "Ubuntu" ]; then deb https://apt.kubernetes.io/ kubernetes-xenial main EOF try_command apt-get update - try_command apt-get install -y kubelet=1.15.3-00 kubeadm=1.15.3-00 kubectl=1.15.3-00 openssh-client fabric + try_command apt-get install -y kubelet=1.18.2-00 kubeadm=1.18.2-00 kubectl=1.18.2-00 openssh-client fabric try_command apt-mark hold kubelet kubeadm kubectl elif [ "$LINUX_DISTRO" == "CentOS" ]; then cat < /etc/yum.repos.d/kubernetes.repo @@ -75,7 +75,7 @@ repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg exclude=kube* EOF -yum install -y kubelet-1.15.3 kubeadm-1.15.3 kubectl-1.15.3 openssh-clients fabric --disableexcludes=kubernetes + try_command yum install -y kubelet-1.18.2-0 kubeadm-1.18.2-0 kubectl-1.18.2-0 openssh-clients fabric --disableexcludes=kubernetes else echo -e $ECHO_PREFIX_INFO "The installation will be cancelled." echo -e $ECHO_PREFIX_INFO "The CDN-Transcode-Sample does not support this OS, please use Ubuntu 18.04 or CentOS 7.6.\n" @@ -112,7 +112,7 @@ try_command systemctl restart kubelet # Kubeadm init unset http_proxy unset https_proxy -try_command kubeadm init --kubernetes-version=v1.15.3 --pod-network-cidr=10.244.0.0/16 +try_command kubeadm init --kubernetes-version=v1.18.2 --pod-network-cidr=10.244.0.0/16 try_command mkdir -p $HOME/.kube try_command cp -f /etc/kubernetes/admin.conf $HOME/.kube/config try_command chown $(id -u):$(id -g) $HOME/.kube/config @@ -122,7 +122,7 @@ try_command kubectl taint nodes --all node-role.kubernetes.io/master- # Set Proxy if need export http_proxy=$proxy_http export https_proxy=$proxy_https -try_command kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml +try_command kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml try_command sed -i '/- kube-apiserver/a\\ - --service-node-port-range=1-65535' /etc/kubernetes/manifests/kube-apiserver.yaml echo -e $ECHO_PREFIX_INFO "Installation completed." diff --git a/script/Kubernetes_setup_node.sh b/script/Kubernetes_setup_node.sh index ab5c520..5075bf0 100755 --- a/script/Kubernetes_setup_node.sh +++ b/script/Kubernetes_setup_node.sh @@ -57,7 +57,7 @@ if [ "$LINUX_DISTRO" == "Ubuntu" ]; then deb https://apt.kubernetes.io/ kubernetes-xenial main EOF try_command apt-get update - try_command apt-get install -y kubelet=1.15.3-00 kubeadm=1.15.3-00 kubectl=1.15.3-00 + try_command apt-get install -y kubelet=1.18.2-00 kubeadm=1.18.2-00 kubectl=1.18.2-00 try_command apt-mark hold kubelet kubeadm kubectl elif [ "$LINUX_DISTRO" == "CentOS" ]; then cat < /etc/yum.repos.d/kubernetes.repo @@ -70,7 +70,7 @@ repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg exclude=kube* EOF -yum install -y kubelet-1.15.3 kubeadm-1.15.3 kubectl-1.15.3 --disableexcludes=kubernetes + try_command yum install -y kubelet-1.18.2-0 kubeadm-1.18.2-0 kubectl-1.18.2-0 --disableexcludes=kubernetes else echo -e $ECHO_PREFIX_INFO "The installation will be cancelled." echo -e $ECHO_PREFIX_INFO "The CDN-Transcode-Sample does not support this OS, please use Ubuntu 18.04 or CentOS 7.6.\n" From 9b0b2c62820a17253830f0592f974232cadf82e5 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Wed, 13 May 2020 10:22:44 -0700 Subject: [PATCH 03/91] remove docker sudo & move kubectl out of python code --- deployment/docker-swarm/start.sh | 10 ++--- deployment/docker-swarm/stop.sh | 10 ++--- deployment/kubernetes/run_with_command.py | 45 +++++++---------------- deployment/kubernetes/start.sh | 4 +- script/build.sh | 4 +- script/shell.sh | 6 +-- 6 files changed, 32 insertions(+), 47 deletions(-) diff --git a/deployment/docker-swarm/start.sh b/deployment/docker-swarm/start.sh index ed5df8c..a0a7335 100755 --- a/deployment/docker-swarm/start.sh +++ b/deployment/docker-swarm/start.sh @@ -8,9 +8,9 @@ export NGINX_LOG_VOLUME=$(readlink -f "/var/log/nginx") export HTML_VOLUME=$(readlink -f "$DIR/../../volume/html") export SECRETS_VOLUME=$(readlink -f "$DIR/../certificate") -sudo docker container prune -f -sudo docker volume prune -f -sudo docker network prune -f +docker container prune -f +docker volume prune -f +docker network prune -f sudo rm -rf "${VIDEO_DASH_VOLUME}" "${VIDEO_HLS_VOLUME}" sudo mkdir -p "${VIDEO_DASH_VOLUME}" "${VIDEO_HLS_VOLUME}" "${NGINX_LOG_VOLUME}" @@ -30,12 +30,12 @@ docker_compose) fi export USER_ID=$(id -u) export GROUP_ID=$(id -g) - sudo -E docker-compose -f "$yml" -p ovc --compatibility up + docker-compose -f "$yml" -p ovc --compatibility up ;; *) export USER_ID=$(id -u) export GROUP_ID=$(id -g) "$DIR/../certificate/self-sign.sh" - sudo -E docker stack deploy -c "$yml" ovc + docker stack deploy -c "$yml" ovc ;; esac diff --git a/deployment/docker-swarm/stop.sh b/deployment/docker-swarm/stop.sh index d3f4b54..829d621 100755 --- a/deployment/docker-swarm/stop.sh +++ b/deployment/docker-swarm/stop.sh @@ -15,13 +15,13 @@ docker_compose) echo "" exit 0 fi - sudo docker-compose -f "$yml" -p ovc --compatibility down + docker-compose -f "$yml" -p ovc --compatibility down ;; *) - sudo docker stack rm ovc + docker stack rm ovc ;; esac -sudo docker container prune -f -sudo docker volume prune -f -sudo docker network prune -f +docker container prune -f +docker volume prune -f +docker network prune -f diff --git a/deployment/kubernetes/run_with_command.py b/deployment/kubernetes/run_with_command.py index feebeab..fb80064 100755 --- a/deployment/kubernetes/run_with_command.py +++ b/deployment/kubernetes/run_with_command.py @@ -11,16 +11,6 @@ import functools sys.path.append(sys.argv[1]) -def get_node_num(): - node_num = int(os.popen( - "kubectl get node | awk '{print $1}' | sed -n '2, $p' | wc -l").read()) - if node_num == 0: - print("Error, no nodes were found, please check environment!!!") - os._exit(1) - print("There are " + str(node_num) + - " kubernetes nodes on your host server!!!") - return node_num - def ping(host): cmd = 'ping -c %d %s' % (1, host) p = subprocess.Popen(args=cmd, shell=True, @@ -196,22 +186,14 @@ def configure_transcode_service(service_name, num, trans_cfg_dict): configure_live_transcode_args( service_name_index, num, trans_cfg_dict, image_name.lower()) -def get_node_information(): - node_dict = {} - basic_info = os.popen("kubectl describe node").read() - index_list = [i.start() for i in re.finditer("Name:", basic_info)] - for i in range(len(index_list)): - cpu_info = re.findall( - "(\d+)", os.popen("kubectl describe node | awk -F ' ' '$1==\"cpu\"' |awk 'NR==" + str(i+1) + "'").read()) - memory_info = re.findall( - "(\d+)", os.popen("kubectl describe node | awk -F ' ' '$1==\"memory\" {print $0}'").read()) - cpu = int(int(re.search( - "cpu:\s+(\d+)", basic_info[index_list[i]: -1]).group(1)) - int(cpu_info[0])/1000) - memory = int((int(re.search( - "memory:\s+(\d+)", basic_info[index_list[i]: -1]).group(1)) / 1024 - int(memory_info[0]))) - if cpu > 0 and memory > 0: - node_dict[re.search("Name:\s+(.+)", basic_info[index_list[i]: -1] - ).group(1)] = {"cpu": cpu, "memory": memory} +def get_node_information(description): + node_dict={} + for line in description.split("\n"): + fields=line.split() + if fields[2].endswith("Ki"): memory=int(fields[2][:-2])/1024 + if fields[2].endswith("Mi"): memory=int(fields[2][:-2]) + if fields[2].endswith("Gi"): memory=int(fields[2][:-2])*1024 + node_dict[fields[0]]={ "cpu": int(fields[1]), "memory": int(memory) } return node_dict def get_config(config_file): @@ -223,22 +205,23 @@ def get_config(config_file): config_dict[k] = dict(config_dict[k]) return config_dict -node_num = get_node_num() - -sw_node_name_list = os.popen( - "kubectl get node | awk '{print $1}' | sed -n '2, $p'").read().split("\n") +sw_node_name_list = sys.argv[4].split(" ") +node_num=len(sw_node_name_list) sw_node_name_list = list(filter(None, sw_node_name_list)) hw_node_name_list = copy.deepcopy(sw_node_name_list) hw_node_num = len(hw_node_name_list) nfs_server, volume_directory, video_list = configure_basic_module(node_num) pods_dict = {"cdn": {}, "redis": {}, "zookeeper": {}, "kafka": {}} -node_dict = get_node_information() +node_dict = get_node_information(sys.argv[5]) pods = ["cdn", "redis", "zookeeper", "kafka"] DIRS = sys.argv[1] NVODS = sys.argv[2] NLIVES = sys.argv[3] +NNODES = sys.argv[4] +NODE_DESCRIPTION = sys.argv[5] + live_transcode_cfg = DIRS + '/live-transcode.cfg' vod_transcode_cfg = DIRS + '/vod-transcode.cfg' cpu_mem_cfg = DIRS + '/cpu_mem_managerment.cfg' diff --git a/deployment/kubernetes/start.sh b/deployment/kubernetes/start.sh index 4693f18..327d1fd 100755 --- a/deployment/kubernetes/start.sh +++ b/deployment/kubernetes/start.sh @@ -68,7 +68,9 @@ sudo mkdir -p "${NGINX_LOG_VOLUME}" NVODS="${2:-1}" NLIVES="${3:-1}" echo "Generating yamls with NVODS=${NVODS}, NLIVES=${NLIVES}" -"$DIR/run_with_command.py" "$DIR" ${NVODS} ${NLIVES} +NODES=$(kubectl get node | awk '{print $1}' | sed -n '2, $p') +DESCRIPTIONS="$(kubectl get node --no-headers -o custom-columns=NAME:metadata.name,CPU:status.capacity.cpu,MEM:status.capacity.memory)" +"$DIR/run_with_command.py" "$DIR" ${NVODS} ${NLIVES} "$NODES" "$DESCRIPTIONS" "$DIR/../certificate/self-sign.sh" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) diff --git a/script/build.sh b/script/build.sh index 4c0d2b1..8904582 100644 --- a/script/build.sh +++ b/script/build.sh @@ -12,9 +12,9 @@ for dep in .8 .7 .6 .5 .4 .3 .2 .1 ''; do if test -z "$dep"; then image="$IMAGE"; fi if grep -q 'AS build' "${DIR}/Dockerfile$dep"; then - sudo docker build --network=host --file="${DIR}/Dockerfile$dep" --target build -t "$image:build" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') + docker build --network=host --file="${DIR}/Dockerfile$dep" --target build -t "$image:build" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') fi - sudo docker build --network=host --file="${DIR}/Dockerfile$dep" -t "$image:latest" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') + docker build --network=host --file="${DIR}/Dockerfile$dep" -t "$image:latest" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') fi done diff --git a/script/shell.sh b/script/shell.sh index 8617ddf..084abe0 100644 --- a/script/shell.sh +++ b/script/shell.sh @@ -5,16 +5,16 @@ if test -z "${DIR}"; then exit -1 fi -pid="$(sudo docker ps -f ancestor=$IMAGE --format='{{.ID}}' | head -n 1)" +pid="$(docker ps -f ancestor=$IMAGE --format='{{.ID}}' | head -n 1)" if [ -n "$pid" ] && [ "$#" -le "1" ]; then echo "bash into running container...$IMAGE" - sudo docker exec -it $pid ${*-/bin/bash} + docker exec -it $pid ${*-/bin/bash} else echo "bash into new container...$IMAGE" if test -z "$DOCKERFILE"; then DOCKERFILE="${DIR}/Dockerfile" fi args=("$@") - sudo docker run --network=host ${OPTIONS[@]} $(env | grep -E '_(proxy)=' | sed 's/^/-e /') $(grep '^ARG .*=' "$DOCKERFILE" | sed 's/^ARG /-e /') --entrypoint ${1:-/bin/bash} -it "${IMAGE}" ${args[@]:1} + docker run --network=host ${OPTIONS[@]} $(env | grep -E '_(proxy)=' | sed 's/^/-e /') $(grep '^ARG .*=' "$DOCKERFILE" | sed 's/^ARG /-e /') --entrypoint ${1:-/bin/bash} -it "${IMAGE}" ${args[@]:1} fi From 3dfafb7af4ed5c5776fdc428002f6543d847294c Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Wed, 13 May 2020 10:26:30 -0700 Subject: [PATCH 04/91] fix deliminator --- deployment/kubernetes/run_with_command.py | 2 +- deployment/kubernetes/start.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deployment/kubernetes/run_with_command.py b/deployment/kubernetes/run_with_command.py index fb80064..ec9927d 100755 --- a/deployment/kubernetes/run_with_command.py +++ b/deployment/kubernetes/run_with_command.py @@ -205,7 +205,7 @@ def get_config(config_file): config_dict[k] = dict(config_dict[k]) return config_dict -sw_node_name_list = sys.argv[4].split(" ") +sw_node_name_list = sys.argv[4].split("\n") node_num=len(sw_node_name_list) sw_node_name_list = list(filter(None, sw_node_name_list)) hw_node_name_list = copy.deepcopy(sw_node_name_list) diff --git a/deployment/kubernetes/start.sh b/deployment/kubernetes/start.sh index 327d1fd..90af793 100755 --- a/deployment/kubernetes/start.sh +++ b/deployment/kubernetes/start.sh @@ -68,7 +68,7 @@ sudo mkdir -p "${NGINX_LOG_VOLUME}" NVODS="${2:-1}" NLIVES="${3:-1}" echo "Generating yamls with NVODS=${NVODS}, NLIVES=${NLIVES}" -NODES=$(kubectl get node | awk '{print $1}' | sed -n '2, $p') +NODES="$(kubectl get node | awk '{print $1}' | sed -n '2, $p')" DESCRIPTIONS="$(kubectl get node --no-headers -o custom-columns=NAME:metadata.name,CPU:status.capacity.cpu,MEM:status.capacity.memory)" "$DIR/run_with_command.py" "$DIR" ${NVODS} ${NLIVES} "$NODES" "$DESCRIPTIONS" From ded963d7599a6fa0fe5b17e2b225ae75ea19358f Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Fri, 15 May 2020 20:50:43 -0700 Subject: [PATCH 05/91] cdn working in helm --- deployment/certificate/Dockerfile | 10 ++++ deployment/kubernetes/CMakeLists.txt | 1 + deployment/kubernetes/helm/.gitignore | 2 + deployment/kubernetes/helm/CMakeLists.txt | 4 ++ deployment/kubernetes/helm/build.sh | 28 ++++++++++ .../kubernetes/helm/cdn-transcode/Chart.yaml | 10 ++++ .../templates/cdn-service-deployment.yaml | 43 +++++++++++++++ .../templates/cdn-service-service.yaml | 18 ++++++ .../cdn-transcode/templates/html-pvc.yaml | 13 +++++ .../templates/kafka-service-deployment.yaml | 55 +++++++++++++++++++ .../templates/kafka-service-service.yaml | 13 +++++ .../templates/live-service-deployment.yaml | 22 ++++++++ .../templates/redis-service-deployment.yaml | 25 +++++++++ .../templates/redis-service-service.yaml | 13 +++++ .../templates/vod-service-deployment.yaml | 26 +++++++++ .../zookeeper-service-deployment.yaml | 38 +++++++++++++ .../templates/zookeeper-service-service.yaml | 13 +++++ .../helm/cdn-transcode/values.yaml.m4 | 19 +++++++ deployment/kubernetes/helm/html-pv.yaml.m4 | 31 +++++++++++ deployment/kubernetes/helm/mkvolume.sh | 35 ++++++++++++ deployment/kubernetes/helm/start.sh | 14 +++++ deployment/kubernetes/helm/stop.sh | 12 ++++ script/build.sh | 4 +- 23 files changed, 447 insertions(+), 2 deletions(-) create mode 100644 deployment/kubernetes/helm/.gitignore create mode 100644 deployment/kubernetes/helm/CMakeLists.txt create mode 100755 deployment/kubernetes/helm/build.sh create mode 100644 deployment/kubernetes/helm/cdn-transcode/Chart.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/html-pvc.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-service.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/redis-service-service.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-service.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 create mode 100644 deployment/kubernetes/helm/html-pv.yaml.m4 create mode 100755 deployment/kubernetes/helm/mkvolume.sh create mode 100755 deployment/kubernetes/helm/start.sh create mode 100755 deployment/kubernetes/helm/stop.sh diff --git a/deployment/certificate/Dockerfile b/deployment/certificate/Dockerfile index 8d5adcd..ddd0736 100644 --- a/deployment/certificate/Dockerfile +++ b/deployment/certificate/Dockerfile @@ -1,3 +1,13 @@ FROM ubuntu:18.04 RUN apt-get update && apt-get install -y openssh-server + +#### +ARG UID +ARG GID +## must use ; here to ignore user exist status code +RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \ + [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \ + echo +USER ${UID} +#### diff --git a/deployment/kubernetes/CMakeLists.txt b/deployment/kubernetes/CMakeLists.txt index a0b76ba..74aa1cd 100644 --- a/deployment/kubernetes/CMakeLists.txt +++ b/deployment/kubernetes/CMakeLists.txt @@ -1,3 +1,4 @@ set(service "kubernetes") include("${CMAKE_SOURCE_DIR}/script/service.cmake") include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") +include("${CMAKE_SOURCE_DIR}/script/scan-all.cmake") diff --git a/deployment/kubernetes/helm/.gitignore b/deployment/kubernetes/helm/.gitignore new file mode 100644 index 0000000..d237d92 --- /dev/null +++ b/deployment/kubernetes/helm/.gitignore @@ -0,0 +1,2 @@ +cdn-transcode/values.yaml +*-pv.yaml diff --git a/deployment/kubernetes/helm/CMakeLists.txt b/deployment/kubernetes/helm/CMakeLists.txt new file mode 100644 index 0000000..a20f9ba --- /dev/null +++ b/deployment/kubernetes/helm/CMakeLists.txt @@ -0,0 +1,4 @@ +set(service "helm") +include("${CMAKE_SOURCE_DIR}/script/service.cmake") +include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") +add_custom_target(volume ${CMAKE_CURRENT_SOURCE_DIR}/mkvolume.sh) diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh new file mode 100755 index 0000000..6d2daa5 --- /dev/null +++ b/deployment/kubernetes/helm/build.sh @@ -0,0 +1,28 @@ +#!/bin/bash -e + +DIR=$(dirname $(readlink -f "$0")) +NVODS="${1:-1}" +NLIVES="${2:-1}" +HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') + +echo "Generating persistent volume yaml(s)" +# list all workers +hosts=($(kubectl get node -l vcac-zone!=yes -o custom-columns=NAME:metadata.name,STATUS:status.conditions[-1].type,TAINT:spec.taints | grep " Ready " | grep -v "NoSchedule" | cut -f1 -d' ')) +if test ${#hosts[@]} -eq 0; then + printf "\nFailed to locate worker node(s) for shared storage\n\n" + exit 1 +elif test ${#hosts[@]} -lt 2; then + hosts=(${hosts[0]} ${hosts[0]}) +fi + +export HTML_VOLUME_PATH=/tmp/volume/html +export HTML_VOLUME_SIZE=1Gi +export HTML_VOLUME_HOST=${hosts[0]} + +for pv in "$DIR"/*-pv.yaml.m4; do + m4 $(env | grep _VOLUME_ | sed 's/^/-D/') "$pv" > "${pv/.m4/}" +done + +echo "Generating helm chart" +m4 -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml" + diff --git a/deployment/kubernetes/helm/cdn-transcode/Chart.yaml b/deployment/kubernetes/helm/cdn-transcode/Chart.yaml new file mode 100644 index 0000000..0be8f4a --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: A Helm chart for the CDN Transcode sample +home: https://github.com/OpenVisualCloud/CDN-Transocde-Sample +icon: https://raw.githubusercontent.com/OpenVisualCloud/CDN-Transcode-Sample/master/volume/html/favicon.ico +name: cdn-transcode-sample +sources: +- https://github.com/OpenVisualCloud/CDN-Transcode-Sample +type: application +version: 0.1.0 diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml new file mode 100644 index 0000000..aada6b1 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: cdn-service + name: cdn-service +spec: + selector: + matchLabels: + app: cdn-service + replicas: 1 + template: + metadata: + creationTimestamp: null + labels: + app: cdn-service + spec: + containers: + - args: + - bash + - -c + - /home/main.py&/usr/local/sbin/nginx + image: ovc_cdn_service:latest + imagePullPolicy: IfNotPresent + name: cdn-service + ports: + - containerPort: 8080 + - containerPort: 1935 + volumeMounts: + - mountPath: /var/run/secrets + name: secrets + readOnly: true + - mountPath: /var/www/html + name: html + readOnly: true + volumes: + - name: secrets + secret: + secretName: self-signed-certificate + - name: html + persistentVolumeClaim: + claimName: html + restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml new file mode 100644 index 0000000..33523f1 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: cdn-service + name: cdn-service +spec: + ports: + - name: "443" + port: 443 + targetPort: 8080 + - name: "1935" + port: 1935 + targetPort: 1935 + externalIPs: + - "{{ .Values.cdn.hostIP }}" + selector: + app: cdn-service diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/html-pvc.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/html-pvc.yaml new file mode 100644 index 0000000..4555f4c --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/html-pvc.yaml @@ -0,0 +1,13 @@ + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: html +spec: + accessModes: + - ReadOnlyMany + storageClassName: html + resources: + requests: + storage: "{{ .Values.volume.html.size }}" + diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml new file mode 100644 index 0000000..e7ddb3e --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: kafka-service + name: kafka-service +spec: + selector: + matchLabels: + app: kafka-service + replicas: 1 + template: + metadata: + labels: + app: kafka-service + spec: + containers: + - env: + - name: KAFKA_ADVERTISED_HOST_NAME + value: kafka-service + - name: KAFKA_ADVERTISED_LISTENERS + value: PLAINTEXT://kafka-service:9092 + - name: KAFKA_ADVERTISED_PORT + value: "9092" + - name: KAFKA_AUTO_CREATE_TOPICS_ENABLE + value: "true" + - name: KAFKA_BROKER_ID + value: "1" + - name: KAFKA_CREATE_TOPICS + value: content_provider_sched:16:1 + - name: KAFKA_DEFAULT_REPLICATION_FACTOR + value: "1" + - name: KAFKA_HEAP_OPTS + value: -Xmx{{ .Values.kafka.heapSize }} -Xms{{ .Values.kafka.heapSize }} + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: PLAINTEXT + - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP + value: PLAINTEXT:PLAINTEXT + - name: KAFKA_LOG4J_LOGGERS + value: kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR + - name: KAFKA_LOG4J_ROOT_LOGLEVEL + value: ERROR + - name: KAFKA_LOG_RETENTION_HOURS + value: "8" + - name: KAFKA_NUM_PARTITIONS + value: "16" + - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR + value: "1" + - name: KAFKA_ZOOKEEPER_CONNECT + value: zookeeper-service:2181 + image: wurstmeister/kafka:latest + name: kafka-service + ports: + - containerPort: 9092 + restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-service.yaml new file mode 100644 index 0000000..45940ca --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: kafka-service + name: kafka-service +spec: + ports: + - name: "9092" + port: 9092 + targetPort: 9092 + selector: + app: kafka-service diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml new file mode 100644 index 0000000..923255a --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: live-service + name: live-service +spec: + selector: + matchLabels: + app: live-service + replicas: {{ .Values.liveTranscode.replicas }} + template: + metadata: + creationTimestamp: null + labels: + app: live-service + spec: + containers: + - image: ovc_software_transcode_service:latest + imagePullPolicy: IfNotPresent + name: live-service + restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml new file mode 100644 index 0000000..ce7efa0 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: redis-service + name: redis-service +spec: + selector: + matchLabels: + app: redis-service + replicas: 1 + template: + metadata: + creationTimestamp: null + labels: + app: redis-service + spec: + containers: + - args: + - redis-server + image: redis:latest + name: redis-service + ports: + - containerPort: 6379 + restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-service.yaml new file mode 100644 index 0000000..22bd6ad --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-service + name: redis-service +spec: + ports: + - name: "6379" + port: 6379 + targetPort: 6379 + selector: + app: redis-service diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml new file mode 100644 index 0000000..8182084 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: vod-service + name: vod-service +spec: + selector: + matchLabels: + app: vod-service + replicas: {{ .Values.vodTranscode.replicas }} + template: + metadata: + creationTimestamp: null + labels: + app: vod-service + spec: + containers: + - args: + - bash + - -c + - /home/main.py + image: ovc_software_transcode_service:latest + imagePullPolicy: IfNotPresent + name: vod-service + restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml new file mode 100644 index 0000000..4919bf5 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: zookeeper-service + name: zookeeper-service +spec: + selector: + matchLabels: + app: zookeeper-service + replicas: 1 + template: + metadata: + creationTimestamp: null + labels: + app: zookeeper-service + spec: + containers: + - env: + - name: ZOOKEEPER_CLIENT_PORT + value: "2181" + - name: ZOOKEEPER_HEAP_OPTS + value: -Xmx{{ .Values.zookeeper.heapSize }} -Xms{{ .Values.zookeeper.heapSize }} + - name: ZOOKEEPER_LOG4J_LOGGERS + value: zookeepr=ERROR + - name: ZOOKEEPER_LOG4J_ROOT_LOGLEVEL + value: ERROR + - name: ZOOKEEPER_MAX_CLIENT_CNXNS + value: "20000" + - name: ZOOKEEPER_SERVER_ID + value: "1" + - name: ZOOKEEPER_TICK_TIME + value: "2000" + image: zookeeper:latest + name: zookeeper-service + ports: + - containerPort: 2181 + restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-service.yaml new file mode 100644 index 0000000..acdf70d --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: zookeeper-service + name: zookeeper-service +spec: + ports: + - name: "2181" + port: 2181 + targetPort: 2181 + selector: + app: zookeeper-service diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 new file mode 100644 index 0000000..e05da4e --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -0,0 +1,19 @@ + +zookeeper: + heapSize: 1024m + +kafka: + heapSize: 1024m + +liveTranscode: + replicas: defn(`NLIVES') + +vodTranscode: + replicas: defn(`NVODS') + +cdn: + hostIP: defn(`HOSTIP') + +volume: + html: + size: defn(`HTML_VOLUME_SIZE') diff --git a/deployment/kubernetes/helm/html-pv.yaml.m4 b/deployment/kubernetes/helm/html-pv.yaml.m4 new file mode 100644 index 0000000..2c3c3b7 --- /dev/null +++ b/deployment/kubernetes/helm/html-pv.yaml.m4 @@ -0,0 +1,31 @@ + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: html +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: html +spec: + capacity: + storage: defn(`HTML_VOLUME_SIZE') + accessModes: + - ReadOnlyMany + persistentVolumeReclaimPolicy: Retain + storageClassName: html + local: + path: defn(`HTML_VOLUME_PATH') + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "defn(`HTML_VOLUME_HOST')" diff --git a/deployment/kubernetes/helm/mkvolume.sh b/deployment/kubernetes/helm/mkvolume.sh new file mode 100755 index 0000000..f2e6c20 --- /dev/null +++ b/deployment/kubernetes/helm/mkvolume.sh @@ -0,0 +1,35 @@ +#!/bin/bash -e + +DIR=$(dirname $(readlink -f "$0")) + +echo "Making volumes..." +HOSTS=$(kubectl get node -o 'custom-columns=NAME:.status.addresses[?(@.type=="Hostname")].address,IP:.status.addresses[?(@.type=="InternalIP")].address' | awk '!/NAME/{print $1":"$2}') +awk -v DIR="$DIR" -v HOSTS="$HOSTS" ' +BEGIN{ + split(HOSTS,tmp1," "); + for (i in tmp1) { + split(tmp1[i],tmp2,":"); + host2ip[tmp2[1]]=tmp2[2]; + } +} +/name:/ { + gsub("-","/",$2) + content="\""DIR"/../../../volume/"$2"\"" +} +/path:/ { + path=$2 +} +/- ".*"/ { + host=host2ip[substr($2,2,length($2)-2)]; + paths[host][path]=1; + contents[host][path]=content +} +END { + for (host in paths) { + for (path in paths[host]) { + system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); + system("scp -r "contents[host][path]"/* "host":"path); + } + } +} +' "$DIR"/*-pv.yaml diff --git a/deployment/kubernetes/helm/start.sh b/deployment/kubernetes/helm/start.sh new file mode 100755 index 0000000..980d22c --- /dev/null +++ b/deployment/kubernetes/helm/start.sh @@ -0,0 +1,14 @@ +#!/bin/bash -e + +DIR=$(dirname $(readlink -f "$0")) + +function create_secret { + kubectl create secret generic self-signed-certificate "--from-file=${DIR}/../../certificate/self.crt" "--from-file=${DIR}/../../certificate/self.key" +} + +# create secrets +"$DIR/../../certificate/self-sign.sh" +create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) + +kubectl apply -f "$DIR"/*-pv.yaml +helm install smtc "$DIR/cdn-transcode" diff --git a/deployment/kubernetes/helm/stop.sh b/deployment/kubernetes/helm/stop.sh new file mode 100755 index 0000000..77e96cf --- /dev/null +++ b/deployment/kubernetes/helm/stop.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +DIR=$(dirname $(readlink -f "$0")) + +helm uninstall smtc + +# delete pvs and scs +for yaml in $(find "${DIR}" -maxdepth 1 -name "*-pv.yaml" -print); do + kubectl delete -f "$yaml" --ignore-not-found=true 2>/dev/null +done + +kubectl delete secret self-signed-certificate 2> /dev/null || echo -n "" diff --git a/script/build.sh b/script/build.sh index 8904582..abf7d97 100644 --- a/script/build.sh +++ b/script/build.sh @@ -12,9 +12,9 @@ for dep in .8 .7 .6 .5 .4 .3 .2 .1 ''; do if test -z "$dep"; then image="$IMAGE"; fi if grep -q 'AS build' "${DIR}/Dockerfile$dep"; then - docker build --network=host --file="${DIR}/Dockerfile$dep" --target build -t "$image:build" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') + docker build --network=host --file="${DIR}/Dockerfile$dep" --target build -t "$image:build" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') --build-arg UID=$(id -u) --build-arg GID=$(id -g) fi - docker build --network=host --file="${DIR}/Dockerfile$dep" -t "$image:latest" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') + docker build --network=host --file="${DIR}/Dockerfile$dep" -t "$image:latest" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') --build-arg UID=$(id -u) --build-arg GID=$(id -g) fi done From 39d0c9f28752933a473dfd0fe3c5962cbe8e517e Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Fri, 15 May 2020 21:39:19 -0700 Subject: [PATCH 06/91] vod working --- deployment/kubernetes/helm/build.sh | 12 +++++++ .../templates/cdn-service-deployment.yaml | 16 ++++++++++ .../templates/video-archive-pvc.yaml | 13 ++++++++ .../templates/video-dash-pvc.yaml | 13 ++++++++ .../templates/video-hls-pvc.yaml | 13 ++++++++ .../templates/vod-service-deployment.yaml | 18 +++++++++++ .../helm/cdn-transcode/values.yaml.m4 | 7 +++++ deployment/kubernetes/helm/start.sh | 4 ++- .../kubernetes/helm/video-archive-pv.yaml.m4 | 31 +++++++++++++++++++ .../kubernetes/helm/video-dash-pv.yaml.m4 | 31 +++++++++++++++++++ .../kubernetes/helm/video-hls-pv.yaml.m4 | 31 +++++++++++++++++++ 11 files changed, 188 insertions(+), 1 deletion(-) create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/video-dash-pvc.yaml create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/video-hls-pvc.yaml create mode 100644 deployment/kubernetes/helm/video-archive-pv.yaml.m4 create mode 100644 deployment/kubernetes/helm/video-dash-pv.yaml.m4 create mode 100644 deployment/kubernetes/helm/video-hls-pv.yaml.m4 diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh index 6d2daa5..ec3881a 100755 --- a/deployment/kubernetes/helm/build.sh +++ b/deployment/kubernetes/helm/build.sh @@ -19,6 +19,18 @@ export HTML_VOLUME_PATH=/tmp/volume/html export HTML_VOLUME_SIZE=1Gi export HTML_VOLUME_HOST=${hosts[0]} +export ARCHIVE_VOLUME_PATH=/tmp/volume/video/archive +export ARCHIVE_VOLUME_SIZE=1Gi +export ARCHIVE_VOLUME_HOST=${hosts[0]} + +export DASH_VOLUME_PATH=/tmp/volume/video/dash +export DASH_VOLUME_SIZE=1Gi +export DASH_VOLUME_HOST=${hosts[1]} + +export HLS_VOLUME_PATH=/tmp/volume/video/hls +export HLS_VOLUME_SIZE=1Gi +export HLS_VOLUME_HOST=${hosts[1]} + for pv in "$DIR"/*-pv.yaml.m4; do m4 $(env | grep _VOLUME_ | sed 's/^/-D/') "$pv" > "${pv/.m4/}" done diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml index aada6b1..edef38f 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml @@ -33,6 +33,13 @@ spec: - mountPath: /var/www/html name: html readOnly: true + - mountPath: /var/www/archive + name: archive + readOnly: true + - mountPath: /var/www/dash + name: dash + - mountPath: /var/www/hls + name: hls volumes: - name: secrets secret: @@ -40,4 +47,13 @@ spec: - name: html persistentVolumeClaim: claimName: html + - name: archive + persistentVolumeClaim: + claimName: video-archive + - name: dash + persistentVolumeClaim: + claimName: video-dash + - name: hls + persistentVolumeClaim: + claimName: video-hls restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml new file mode 100644 index 0000000..3056693 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml @@ -0,0 +1,13 @@ + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: video-archive +spec: + accessModes: + - ReadOnlyMany + storageClassName: video-archive + resources: + requests: + storage: "{{ .Values.volume.video.archive.size }}" + diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/video-dash-pvc.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/video-dash-pvc.yaml new file mode 100644 index 0000000..101c890 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/video-dash-pvc.yaml @@ -0,0 +1,13 @@ + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: video-dash +spec: + accessModes: + - ReadWriteMany + storageClassName: video-dash + resources: + requests: + storage: "{{ .Values.volume.video.dash.size }}" + diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/video-hls-pvc.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/video-hls-pvc.yaml new file mode 100644 index 0000000..764066e --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/video-hls-pvc.yaml @@ -0,0 +1,13 @@ + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: video-hls +spec: + accessModes: + - ReadWriteMany + storageClassName: video-hls + resources: + requests: + storage: "{{ .Values.volume.video.hls.size }}" + diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml index 8182084..db03351 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml @@ -23,4 +23,22 @@ spec: image: ovc_software_transcode_service:latest imagePullPolicy: IfNotPresent name: vod-service + volumeMounts: + - mountPath: /var/www/archive + name: archive + readOnly: true + - mountPath: /var/www/dash + name: dash + - mountPath: /var/www/hls + name: hls + volumes: + - name: archive + persistentVolumeClaim: + claimName: video-archive + - name: dash + persistentVolumeClaim: + claimName: video-dash + - name: hls + persistentVolumeClaim: + claimName: video-hls restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index e05da4e..c3877ad 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -17,3 +17,10 @@ cdn: volume: html: size: defn(`HTML_VOLUME_SIZE') + video: + archive: + size: defn(`ARCHIVE_VOLUME_SIZE') + dash: + size: defn(`DASH_VOLUME_SIZE') + hls: + size: defn(`HLS_VOLUME_SIZE') diff --git a/deployment/kubernetes/helm/start.sh b/deployment/kubernetes/helm/start.sh index 980d22c..b07f002 100755 --- a/deployment/kubernetes/helm/start.sh +++ b/deployment/kubernetes/helm/start.sh @@ -10,5 +10,7 @@ function create_secret { "$DIR/../../certificate/self-sign.sh" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) -kubectl apply -f "$DIR"/*-pv.yaml +for yaml in $(find "$DIR" -maxdepth 1 -name "*-pv.yaml" -print); do + kubectl apply -f "$yaml" +done helm install smtc "$DIR/cdn-transcode" diff --git a/deployment/kubernetes/helm/video-archive-pv.yaml.m4 b/deployment/kubernetes/helm/video-archive-pv.yaml.m4 new file mode 100644 index 0000000..93f4404 --- /dev/null +++ b/deployment/kubernetes/helm/video-archive-pv.yaml.m4 @@ -0,0 +1,31 @@ + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: video-archive +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: video-archive +spec: + capacity: + storage: defn(`ARCHIVE_VOLUME_SIZE') + accessModes: + - ReadOnlyMany + persistentVolumeReclaimPolicy: Retain + storageClassName: video-archive + local: + path: defn(`ARCHIVE_VOLUME_PATH') + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "defn(`ARCHIVE_VOLUME_HOST')" diff --git a/deployment/kubernetes/helm/video-dash-pv.yaml.m4 b/deployment/kubernetes/helm/video-dash-pv.yaml.m4 new file mode 100644 index 0000000..e01a117 --- /dev/null +++ b/deployment/kubernetes/helm/video-dash-pv.yaml.m4 @@ -0,0 +1,31 @@ + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: video-dash +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: video-dash +spec: + capacity: + storage: defn(`DASH_VOLUME_SIZE') + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: video-dash + local: + path: defn(`DASH_VOLUME_PATH') + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "defn(`DASH_VOLUME_HOST')" diff --git a/deployment/kubernetes/helm/video-hls-pv.yaml.m4 b/deployment/kubernetes/helm/video-hls-pv.yaml.m4 new file mode 100644 index 0000000..d95e90b --- /dev/null +++ b/deployment/kubernetes/helm/video-hls-pv.yaml.m4 @@ -0,0 +1,31 @@ + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: video-hls +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: video-hls +spec: + capacity: + storage: defn(`HLS_VOLUME_SIZE') + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: video-hls + local: + path: defn(`HLS_VOLUME_PATH') + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "defn(`HLS_VOLUME_HOST')" From ba5eec0434696bc59d0e0476988e04e1a1998c3d Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Fri, 15 May 2020 21:53:47 -0700 Subject: [PATCH 07/91] added live --- .../cdn-transcode/templates/live-service-deployment.yaml | 9 +++++++++ deployment/kubernetes/helm/start.sh | 2 +- deployment/kubernetes/helm/stop.sh | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml index 923255a..693ad6d 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml @@ -17,6 +17,15 @@ spec: spec: containers: - image: ovc_software_transcode_service:latest + command: ["/bin/bash","-c","ffmpeg -re -stream_loop -1 -i /var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4 -vf scale=2560:1440 -c:v libsvt_hevc -b:v 15M -forced-idr 1 -f flv rtmp://cdn-service/hls/big_buck_bunny_2560x1440 -vf scale=1920:1080 -c:v libsvt_hevc -b:v 10M -forced-idr 1 -f flv rtmp://cdn-service/hls/big_buck_bunny_1920x1080 -vf scale=1280:720 -c:v libx264 -b:v 8M -f flv rtmp://cdn-service/hls/big_buck_bunny_1280x720 -vf scale=854:480 -c:v libx264 -b:v 6M -f flv rtmp://cdn-service/hls/big_buck_bunny_854x480 -abr_pipeline"] imagePullPolicy: IfNotPresent name: live-service + volumeMounts: + - mountPath: /var/www/archive + name: archive + readOnly: true + volumes: + - name: archive + persistentVolumeClaim: + claimName: video-archive restartPolicy: Always diff --git a/deployment/kubernetes/helm/start.sh b/deployment/kubernetes/helm/start.sh index b07f002..61d931b 100755 --- a/deployment/kubernetes/helm/start.sh +++ b/deployment/kubernetes/helm/start.sh @@ -13,4 +13,4 @@ create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; cre for yaml in $(find "$DIR" -maxdepth 1 -name "*-pv.yaml" -print); do kubectl apply -f "$yaml" done -helm install smtc "$DIR/cdn-transcode" +helm install cdn-transcode "$DIR/cdn-transcode" diff --git a/deployment/kubernetes/helm/stop.sh b/deployment/kubernetes/helm/stop.sh index 77e96cf..dd20555 100755 --- a/deployment/kubernetes/helm/stop.sh +++ b/deployment/kubernetes/helm/stop.sh @@ -2,7 +2,7 @@ DIR=$(dirname $(readlink -f "$0")) -helm uninstall smtc +helm uninstall cdn-transcode # delete pvs and scs for yaml in $(find "${DIR}" -maxdepth 1 -name "*-pv.yaml" -print); do From baee66c62cf1000a139fef3ba48c494184c81814 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Fri, 15 May 2020 22:19:46 -0700 Subject: [PATCH 08/91] add README --- .../kubernetes/helm/cdn-transcode/README.md | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 deployment/kubernetes/helm/cdn-transcode/README.md diff --git a/deployment/kubernetes/helm/cdn-transcode/README.md b/deployment/kubernetes/helm/cdn-transcode/README.md new file mode 100644 index 0000000..c79dc99 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/README.md @@ -0,0 +1,33 @@ + +The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box CDN media transcode service, including live streaming and video on demand. It also provides docker-based media delivery software development environment upon which developer can easily build their specific applications. + +### Prerequisites: + +The Sample assumes that you have a ready-to-use Kubernetes cluster environment with `helm` to manage the applicatoin deployment. + +### Build: + +```bash +mkdir build +cd build +cmake .. +make +``` + +### Create Shared Volumes: + +```bash +make volume +``` + +The `make volume` command creates local persistent volumes under the /tmp directory of the first two Kubernetes workers. This is a temporary solution for quick sample deployment. For scalability beyond a two-node cluster, consider rewriting the `mkvolume.sh` script. + +`make volume` uses `scp` to copy volumes to the Kubernetes workers, assuming that the Kubernetes master can password-less access to the Kubernetes workers. + +### Start/Stop Sample: + +```bash +make start_helm +make stop_helm +``` + From 1a723b47706ef8cfde299c3ffd79d753e4dfde45 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sat, 16 May 2020 08:56:12 -0700 Subject: [PATCH 09/91] no wait in kubectl delete --- deployment/kubernetes/helm/stop.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/kubernetes/helm/stop.sh b/deployment/kubernetes/helm/stop.sh index dd20555..63ee139 100755 --- a/deployment/kubernetes/helm/stop.sh +++ b/deployment/kubernetes/helm/stop.sh @@ -6,7 +6,7 @@ helm uninstall cdn-transcode # delete pvs and scs for yaml in $(find "${DIR}" -maxdepth 1 -name "*-pv.yaml" -print); do - kubectl delete -f "$yaml" --ignore-not-found=true 2>/dev/null + kubectl delete --wait=false -f "$yaml" --ignore-not-found=true 2>/dev/null done kubectl delete secret self-signed-certificate 2> /dev/null || echo -n "" From 0ca6e005f50042ae88556b2f71686b9f3e8c53d6 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sat, 16 May 2020 16:34:16 -0700 Subject: [PATCH 10/91] add more configuration options --- .../templates/live-service-deployment.yaml | 45 ++++++++++++++++--- .../helm/cdn-transcode/values.yaml.m4 | 21 +++++++++ 2 files changed, 59 insertions(+), 7 deletions(-) diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml index 693ad6d..59e13f8 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml @@ -1,25 +1,52 @@ + +{{- range $i,$v1 := .Values.liveTranscode.streams }} +{{- if lt (int $i) (int $.Values.liveTranscode.replicas) }} + apiVersion: apps/v1 kind: Deployment metadata: labels: - app: live-service - name: live-service + app: live-service-{{ $i }} + name: live-service-{{ $i }} spec: selector: matchLabels: - app: live-service - replicas: {{ .Values.liveTranscode.replicas }} + app: live-service-{{ $i }} + replicas: 1 template: metadata: creationTimestamp: null labels: - app: live-service + app: live-service-{{ $i }} spec: containers: - image: ovc_software_transcode_service:latest - command: ["/bin/bash","-c","ffmpeg -re -stream_loop -1 -i /var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4 -vf scale=2560:1440 -c:v libsvt_hevc -b:v 15M -forced-idr 1 -f flv rtmp://cdn-service/hls/big_buck_bunny_2560x1440 -vf scale=1920:1080 -c:v libsvt_hevc -b:v 10M -forced-idr 1 -f flv rtmp://cdn-service/hls/big_buck_bunny_1920x1080 -vf scale=1280:720 -c:v libx264 -b:v 8M -f flv rtmp://cdn-service/hls/big_buck_bunny_1280x720 -vf scale=854:480 -c:v libx264 -b:v 6M -f flv rtmp://cdn-service/hls/big_buck_bunny_854x480 -abr_pipeline"] imagePullPolicy: IfNotPresent - name: live-service + command: ["/usr/local/bin/ffmpeg","-re","-stream_loop","-1", + "-i","{{ .name }}", +{{- range $k,$v2 := .transcode }} + "-vf","scale={{ .scale }}", + "-c:v","{{ .encoderType }}", + "-b:v","{{ .bitrate }}", + "-r","{{ .framerate }}", + "-g","{{ .gop }}", + "-bf","{{ .maxbframes }}", + "-refs","{{ .refsNum }}", + "-preset","{{ .preset }}", + "-forced-idr","1", +{{- if eq ( hasPrefix "libsvt" .encoderType ) true }} + "-thread_count","96", +{{- end }} + "-an", + "-f","flv","rtmp://cdn-service/{{ .protocol }}/media_{{ $i }}_{{ $k }}", +{{- end }} + "-abr_pipeline"] + name: live-service-{{ $i }} + env: + - name: NO_PROXY + value: "cdn-service" + - name: no_proxy + value: "cdn-service" volumeMounts: - mountPath: /var/www/archive name: archive @@ -29,3 +56,7 @@ spec: persistentVolumeClaim: claimName: video-archive restartPolicy: Always + +--- +{{- end }} +{{- end }} diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index c3877ad..5318992 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -7,6 +7,27 @@ kafka: liveTranscode: replicas: defn(`NLIVES') + streams: + - name: "bbb_sunflower_1080p_30fps_normal.mp4" + transcode: + - protocol: dash + scale: "856:480" + bitrate: "8000000" + framerate: 25 + gop: 100 + maxbframes: 2 + refsNum: 2 + preset: veryfast + encoderType: libx264 + - protocol: hls + scale: "856:480" + bitrate: "8000000" + framerate: 25 + gop: 100 + maxbframes: 2 + refsNum: 2 + preset: veryfast + encoderType: libsvt_hevc vodTranscode: replicas: defn(`NVODS') From 51176d2b0a0158bdf3d39c0c0b4359a5670f6373 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sat, 16 May 2020 16:59:34 -0700 Subject: [PATCH 11/91] change stream location --- deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index 5318992..b44fd20 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -8,7 +8,7 @@ kafka: liveTranscode: replicas: defn(`NLIVES') streams: - - name: "bbb_sunflower_1080p_30fps_normal.mp4" + - name: "/var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4" transcode: - protocol: dash scale: "856:480" From 4d085b94a07e994a07f45c195dcde8209306dec0 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sat, 16 May 2020 17:03:58 -0700 Subject: [PATCH 12/91] change default values --- deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index b44fd20..5416656 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -19,6 +19,8 @@ liveTranscode: refsNum: 2 preset: veryfast encoderType: libx264 + - name: "/var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4" + transcode: - protocol: hls scale: "856:480" bitrate: "8000000" From 611baedb05c937a4070cb3639543fe7ff2e21fc2 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sat, 16 May 2020 17:14:01 -0700 Subject: [PATCH 13/91] fix svt preset --- deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index 5416656..63de42d 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -28,7 +28,7 @@ liveTranscode: gop: 100 maxbframes: 2 refsNum: 2 - preset: veryfast + preset: 9 encoderType: libsvt_hevc vodTranscode: From aec5ca53263c5da39df11afcf3bf7248d645467d Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sat, 16 May 2020 18:10:50 -0700 Subject: [PATCH 14/91] add guard if helm is not available --- deployment/kubernetes/helm/build.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh index ec3881a..ece48e2 100755 --- a/deployment/kubernetes/helm/build.sh +++ b/deployment/kubernetes/helm/build.sh @@ -5,6 +5,10 @@ NVODS="${1:-1}" NLIVES="${2:-1}" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') +if [ ! -x /usr/bin/helm ] && [ ! -x /usr/local/bin/helm ]; then + exit 0 +fi + echo "Generating persistent volume yaml(s)" # list all workers hosts=($(kubectl get node -l vcac-zone!=yes -o custom-columns=NAME:metadata.name,STATUS:status.conditions[-1].type,TAINT:spec.taints | grep " Ready " | grep -v "NoSchedule" | cut -f1 -d' ')) From 869db894570e0e2a8d389b2c8e8adc063a5e3cec Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sun, 17 May 2020 18:35:07 -0700 Subject: [PATCH 15/91] support pushing images to private registry after each build --- CMakeLists.txt | 6 ++++++ README.md | 7 +++++++ deployment/docker-swarm/.gitignore | 1 + deployment/docker-swarm/build.sh | 7 +++++++ .../{docker-compose.yml => docker-compose.yml.m4} | 8 ++++---- deployment/kubernetes/build.sh | 3 ++- deployment/kubernetes/cdn-service-deployment.yaml.m4 | 2 +- deployment/kubernetes/helm/build.sh | 3 ++- deployment/kubernetes/helm/cdn-transcode/README.md | 6 ++++++ .../cdn-transcode/templates/cdn-service-deployment.yaml | 2 +- .../cdn-transcode/templates/live-service-deployment.yaml | 2 +- .../cdn-transcode/templates/vod-service-deployment.yaml | 2 +- deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 | 2 ++ deployment/kubernetes/live-service-deployment.yaml.m4 | 2 +- deployment/kubernetes/vod-service-deployment.yaml.m4 | 2 +- script/build.sh | 8 ++++++++ script/deployment.cmake | 2 +- script/service.cmake | 2 +- 18 files changed, 53 insertions(+), 14 deletions(-) create mode 100644 deployment/docker-swarm/.gitignore create mode 100755 deployment/docker-swarm/build.sh rename deployment/docker-swarm/{docker-compose.yml => docker-compose.yml.m4} (93%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 48821d0..b632e78 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,11 @@ cmake_minimum_required (VERSION 2.8) Project(OVC NONE) +if (NOT DEFINED REGISTRY) + set(REGISTRY "") +elseif (NOT ${REGISTRY} MATCHES "/$") + set(REGISTRY "${REGISTRY}/") +endif() if (NOT DEFINED NVODS) set(NVODS "2") endif() @@ -20,3 +25,4 @@ endforeach() # legal message execute_process(COMMAND printf "\nThis script will build third party components licensed under various open source licenses into your container images. The terms under which those components may be used and distributed can be found with the license document that is provided with those components. Please familiarize yourself with those terms to ensure your distribution of those components complies with the terms of those licenses.\n\n") execute_process(COMMAND printf "\n-- Setting: NVODS=${NVODS}, NLIVES=${NLIVES}\n") +execute_process(COMMAND printf "-- Setting: REGISTRY=${REGISTRY}\n") diff --git a/README.md b/README.md index f989ff2..907feb8 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,7 @@ sudo systemctl daemon-reload sudo systemctl restart docker ``` ## Build(Both of master and slave nodes) + Run below commands to build docker images ``` cd CDN-Transcode-Sample @@ -76,6 +77,12 @@ cmake .. make ``` +--- + +If you deploy the sample to a cluster, please configure the sample, as `cmake -DREGISTRY= ..`, to push the sample images to the private docker registry after each build. + +--- + ## Deploy ### Auto deployment using Kubernetes diff --git a/deployment/docker-swarm/.gitignore b/deployment/docker-swarm/.gitignore new file mode 100644 index 0000000..1120be9 --- /dev/null +++ b/deployment/docker-swarm/.gitignore @@ -0,0 +1 @@ +docker-compose.yml diff --git a/deployment/docker-swarm/build.sh b/deployment/docker-swarm/build.sh new file mode 100755 index 0000000..ce18ab4 --- /dev/null +++ b/deployment/docker-swarm/build.sh @@ -0,0 +1,7 @@ +#!/bin/bash -e + +DIR=$(dirname $(readlink -f "$0")) +NVODS="${1:-1}" +REGISTRY="$3" + +m4 -DNVODS=${NVODS} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${DIR}/docker-compose.yml.m4" > "${DIR}/docker-compose.yml" diff --git a/deployment/docker-swarm/docker-compose.yml b/deployment/docker-swarm/docker-compose.yml.m4 similarity index 93% rename from deployment/docker-swarm/docker-compose.yml rename to deployment/docker-swarm/docker-compose.yml.m4 index 154f9a5..a45a748 100644 --- a/deployment/docker-swarm/docker-compose.yml +++ b/deployment/docker-swarm/docker-compose.yml.m4 @@ -52,7 +52,7 @@ services: replicas: 1 cdn-service: - image: ovc_cdn_service:latest + image: defn(`REGISTRY_PREFIX')ovc_cdn_service:latest ports: - "443:8080" volumes: @@ -78,19 +78,19 @@ services: mode: 0440 vod-transcode-service: - image: ovc_software_transcode_service:latest + image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest volumes: - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro - ${VIDEO_DASH_VOLUME}:/var/www/dash:rw - ${VIDEO_HLS_VOLUME}:/var/www/hls:rw deploy: - replicas: 2 + replicas: defn(`NVODS') depends_on: - kafka-service - zookeeper-service live-transcode-service: - image: ovc_software_transcode_service:latest + image: defn(`REGISTRY_PREFIX')vc_software_transcode_service:latest volumes: - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro depends_on: diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh index 7196064..8a5eb80 100755 --- a/deployment/kubernetes/build.sh +++ b/deployment/kubernetes/build.sh @@ -3,6 +3,7 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" +REGISTRY="$3" echo "Generating templates with NVODS=${NVODS}, NLIVES=${NLIVES}" @@ -11,7 +12,7 @@ find "${DIR}" -maxdepth 1 -name "*.cfg" -exec rm -rf "{}" \; for template in $(find "${DIR}" -maxdepth 1 -name "*.yaml.m4" -print); do yaml=${template/.m4/} - m4 -DNVODS=${NVODS} -I "${DIR}" "${template}" > "${yaml}" + m4 -DNVODS=${NVODS} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${template}" > "${yaml}" done cat <> ${DIR}/cpu_mem_managerment.cfg diff --git a/deployment/kubernetes/cdn-service-deployment.yaml.m4 b/deployment/kubernetes/cdn-service-deployment.yaml.m4 index d5124ed..e7a076b 100644 --- a/deployment/kubernetes/cdn-service-deployment.yaml.m4 +++ b/deployment/kubernetes/cdn-service-deployment.yaml.m4 @@ -20,7 +20,7 @@ spec: - bash - -c - /home/main.py&/usr/local/sbin/nginx - image: ovc_cdn_service:latest + image: defn(`REGISTRY_PREFIX')ovc_cdn_service:latest imagePullPolicy: IfNotPresent name: cdn-service ports: diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh index ece48e2..cc4a2e9 100755 --- a/deployment/kubernetes/helm/build.sh +++ b/deployment/kubernetes/helm/build.sh @@ -3,6 +3,7 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" +REGISTRY="$3" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') if [ ! -x /usr/bin/helm ] && [ ! -x /usr/local/bin/helm ]; then @@ -40,5 +41,5 @@ for pv in "$DIR"/*-pv.yaml.m4; do done echo "Generating helm chart" -m4 -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml" +m4 -DREGISTRY_PREFIX=${REGISTRY} -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml" diff --git a/deployment/kubernetes/helm/cdn-transcode/README.md b/deployment/kubernetes/helm/cdn-transcode/README.md index c79dc99..cb75d74 100644 --- a/deployment/kubernetes/helm/cdn-transcode/README.md +++ b/deployment/kubernetes/helm/cdn-transcode/README.md @@ -14,6 +14,12 @@ cmake .. make ``` +--- + +If you deploy the sample to a cluster, please configure the sample, as `cmake -DREGISTRY= ..`, to push the sample images to the private docker registry after each build. + +--- + ### Create Shared Volumes: ```bash diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml index edef38f..2ee6c43 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml @@ -20,7 +20,7 @@ spec: - bash - -c - /home/main.py&/usr/local/sbin/nginx - image: ovc_cdn_service:latest + image: {{ $.Values.registryPrefix }}ovc_cdn_service:latest imagePullPolicy: IfNotPresent name: cdn-service ports: diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml index 59e13f8..1b899f1 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml @@ -20,7 +20,7 @@ spec: app: live-service-{{ $i }} spec: containers: - - image: ovc_software_transcode_service:latest + - image: {{ $.Values.registryPrefix }}ovc_software_transcode_service:latest imagePullPolicy: IfNotPresent command: ["/usr/local/bin/ffmpeg","-re","-stream_loop","-1", "-i","{{ .name }}", diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml index db03351..b2fd110 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml @@ -20,7 +20,7 @@ spec: - bash - -c - /home/main.py - image: ovc_software_transcode_service:latest + image: {{ $.Values.registryPrefix }}ovc_software_transcode_service:latest imagePullPolicy: IfNotPresent name: vod-service volumeMounts: diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index 63de42d..aa5dea0 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -1,4 +1,6 @@ +registryPrefix: "" + zookeeper: heapSize: 1024m diff --git a/deployment/kubernetes/live-service-deployment.yaml.m4 b/deployment/kubernetes/live-service-deployment.yaml.m4 index f5a80ea..82739a4 100644 --- a/deployment/kubernetes/live-service-deployment.yaml.m4 +++ b/deployment/kubernetes/live-service-deployment.yaml.m4 @@ -16,7 +16,7 @@ spec: app: live-service spec: containers: - - image: ovc_software_transcode_service:latest + - image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest imagePullPolicy: IfNotPresent name: live-service resources: diff --git a/deployment/kubernetes/vod-service-deployment.yaml.m4 b/deployment/kubernetes/vod-service-deployment.yaml.m4 index 826f674..8a416fa 100644 --- a/deployment/kubernetes/vod-service-deployment.yaml.m4 +++ b/deployment/kubernetes/vod-service-deployment.yaml.m4 @@ -20,7 +20,7 @@ spec: - bash - -c - /home/main.py - image: ovc_software_transcode_service:latest + image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest imagePullPolicy: IfNotPresent name: vod-service resources: diff --git a/script/build.sh b/script/build.sh index abf7d97..c27f2a7 100644 --- a/script/build.sh +++ b/script/build.sh @@ -5,6 +5,8 @@ if test -z "${DIR}"; then exit -1 fi +REGISTRY="$3" + # build image(s) in order (to satisfy dependencies) for dep in .8 .7 .6 .5 .4 .3 .2 .1 ''; do if test -f "${DIR}/Dockerfile$dep"; then @@ -16,5 +18,11 @@ for dep in .8 .7 .6 .5 .4 .3 .2 .1 ''; do fi docker build --network=host --file="${DIR}/Dockerfile$dep" -t "$image:latest" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') --build-arg UID=$(id -u) --build-arg GID=$(id -g) + + # if REGISTRY is specified, push image to the private registry + if [ -n "$REGISTRY" ]; then + docker tag "$image:latest" "$REGISTRY$image:latest" + docker push "$REGISTRY$image:latest" + fi fi done diff --git a/script/deployment.cmake b/script/deployment.cmake index 0283519..38e4234 100644 --- a/script/deployment.cmake +++ b/script/deployment.cmake @@ -1,2 +1,2 @@ -add_custom_target(start_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}" "${NVODS}" "${NLIVES}") +add_custom_target(start_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}" "${NVODS}" "${NLIVES}" "${REGISTRY}") add_custom_target(stop_${service} "${CMAKE_CURRENT_SOURCE_DIR}/stop.sh" "${service}") diff --git a/script/service.cmake b/script/service.cmake index bdc02c2..4f85122 100644 --- a/script/service.cmake +++ b/script/service.cmake @@ -1,3 +1,3 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/build.sh") - add_custom_target(build_${service} ALL "${CMAKE_CURRENT_SOURCE_DIR}/build.sh" "${NVODS}" "${NLIVES}") + add_custom_target(build_${service} ALL "${CMAKE_CURRENT_SOURCE_DIR}/build.sh" "${NVODS}" "${NLIVES}" "${REGISTRY}") endif() From c285d1175b4548f1f9e00f4c1506e9037c24a84b Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sun, 17 May 2020 18:59:28 -0700 Subject: [PATCH 16/91] add registry prefix to the signing image --- deployment/certificate/self-sign.sh | 3 ++- deployment/docker-swarm/start.sh | 3 ++- deployment/kubernetes/helm/start.sh | 3 ++- deployment/kubernetes/start.sh | 4 +++- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/deployment/certificate/self-sign.sh b/deployment/certificate/self-sign.sh index 29dc664..c46a279 100755 --- a/deployment/certificate/self-sign.sh +++ b/deployment/certificate/self-sign.sh @@ -1,6 +1,7 @@ #!/bin/bash -e -IMAGE="ovc_self_certificate" +REGISTRY="$1" +IMAGE="${REGISTRY}ovc_self_certificate" DIR=$(dirname $(readlink -f "$0")) case "$(cat /proc/1/sched | head -n 1)" in diff --git a/deployment/docker-swarm/start.sh b/deployment/docker-swarm/start.sh index a0a7335..3864dcf 100755 --- a/deployment/docker-swarm/start.sh +++ b/deployment/docker-swarm/start.sh @@ -17,6 +17,7 @@ sudo mkdir -p "${VIDEO_DASH_VOLUME}" "${VIDEO_HLS_VOLUME}" "${NGINX_LOG_VOLUME}" yml="$DIR/docker-compose.$(hostname).yml" test -f "$yml" || yml="$DIR/docker-compose.yml" +REGISTRY="$4" case "$1" in docker_compose) dcv="$(docker-compose --version | cut -f3 -d' ' | cut -f1 -d',')" @@ -35,7 +36,7 @@ docker_compose) *) export USER_ID=$(id -u) export GROUP_ID=$(id -g) - "$DIR/../certificate/self-sign.sh" + "$DIR/../certificate/self-sign.sh" "$REGISTRY" docker stack deploy -c "$yml" ovc ;; esac diff --git a/deployment/kubernetes/helm/start.sh b/deployment/kubernetes/helm/start.sh index 61d931b..bca0e59 100755 --- a/deployment/kubernetes/helm/start.sh +++ b/deployment/kubernetes/helm/start.sh @@ -7,7 +7,8 @@ function create_secret { } # create secrets -"$DIR/../../certificate/self-sign.sh" +REGISTRY="$4" +"$DIR/../../certificate/self-sign.sh" "$REGISTRY" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) for yaml in $(find "$DIR" -maxdepth 1 -name "*-pv.yaml" -print); do diff --git a/deployment/kubernetes/start.sh b/deployment/kubernetes/start.sh index 90af793..b3160b7 100755 --- a/deployment/kubernetes/start.sh +++ b/deployment/kubernetes/start.sh @@ -67,12 +67,14 @@ sudo mkdir -p "${NGINX_LOG_VOLUME}" NVODS="${2:-1}" NLIVES="${3:-1}" +REGISTRY="$4" + echo "Generating yamls with NVODS=${NVODS}, NLIVES=${NLIVES}" NODES="$(kubectl get node | awk '{print $1}' | sed -n '2, $p')" DESCRIPTIONS="$(kubectl get node --no-headers -o custom-columns=NAME:metadata.name,CPU:status.capacity.cpu,MEM:status.capacity.memory)" "$DIR/run_with_command.py" "$DIR" ${NVODS} ${NLIVES} "$NODES" "$DESCRIPTIONS" -"$DIR/../certificate/self-sign.sh" +"$DIR/../certificate/self-sign.sh" "$REGISTRY" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) for i in $(find "$DIR" -maxdepth 1 -name "*.yaml"); do From aa7164125d122d538e2ce8edbd2f26ace1819163 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Sun, 17 May 2020 19:00:40 -0700 Subject: [PATCH 17/91] Revert "add registry prefix to the signing image" This reverts commit 5b738d984ed3453665541b67c33969046e6afbe6. --- deployment/certificate/self-sign.sh | 3 +-- deployment/docker-swarm/start.sh | 3 +-- deployment/kubernetes/helm/start.sh | 3 +-- deployment/kubernetes/start.sh | 4 +--- 4 files changed, 4 insertions(+), 9 deletions(-) diff --git a/deployment/certificate/self-sign.sh b/deployment/certificate/self-sign.sh index c46a279..29dc664 100755 --- a/deployment/certificate/self-sign.sh +++ b/deployment/certificate/self-sign.sh @@ -1,7 +1,6 @@ #!/bin/bash -e -REGISTRY="$1" -IMAGE="${REGISTRY}ovc_self_certificate" +IMAGE="ovc_self_certificate" DIR=$(dirname $(readlink -f "$0")) case "$(cat /proc/1/sched | head -n 1)" in diff --git a/deployment/docker-swarm/start.sh b/deployment/docker-swarm/start.sh index 3864dcf..a0a7335 100755 --- a/deployment/docker-swarm/start.sh +++ b/deployment/docker-swarm/start.sh @@ -17,7 +17,6 @@ sudo mkdir -p "${VIDEO_DASH_VOLUME}" "${VIDEO_HLS_VOLUME}" "${NGINX_LOG_VOLUME}" yml="$DIR/docker-compose.$(hostname).yml" test -f "$yml" || yml="$DIR/docker-compose.yml" -REGISTRY="$4" case "$1" in docker_compose) dcv="$(docker-compose --version | cut -f3 -d' ' | cut -f1 -d',')" @@ -36,7 +35,7 @@ docker_compose) *) export USER_ID=$(id -u) export GROUP_ID=$(id -g) - "$DIR/../certificate/self-sign.sh" "$REGISTRY" + "$DIR/../certificate/self-sign.sh" docker stack deploy -c "$yml" ovc ;; esac diff --git a/deployment/kubernetes/helm/start.sh b/deployment/kubernetes/helm/start.sh index bca0e59..61d931b 100755 --- a/deployment/kubernetes/helm/start.sh +++ b/deployment/kubernetes/helm/start.sh @@ -7,8 +7,7 @@ function create_secret { } # create secrets -REGISTRY="$4" -"$DIR/../../certificate/self-sign.sh" "$REGISTRY" +"$DIR/../../certificate/self-sign.sh" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) for yaml in $(find "$DIR" -maxdepth 1 -name "*-pv.yaml" -print); do diff --git a/deployment/kubernetes/start.sh b/deployment/kubernetes/start.sh index b3160b7..90af793 100755 --- a/deployment/kubernetes/start.sh +++ b/deployment/kubernetes/start.sh @@ -67,14 +67,12 @@ sudo mkdir -p "${NGINX_LOG_VOLUME}" NVODS="${2:-1}" NLIVES="${3:-1}" -REGISTRY="$4" - echo "Generating yamls with NVODS=${NVODS}, NLIVES=${NLIVES}" NODES="$(kubectl get node | awk '{print $1}' | sed -n '2, $p')" DESCRIPTIONS="$(kubectl get node --no-headers -o custom-columns=NAME:metadata.name,CPU:status.capacity.cpu,MEM:status.capacity.memory)" "$DIR/run_with_command.py" "$DIR" ${NVODS} ${NLIVES} "$NODES" "$DESCRIPTIONS" -"$DIR/../certificate/self-sign.sh" "$REGISTRY" +"$DIR/../certificate/self-sign.sh" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) for i in $(find "$DIR" -maxdepth 1 -name "*.yaml"); do From 994bbbe7c16f0ee7f45e493e57c393661f5f1b2d Mon Sep 17 00:00:00 2001 From: Zhizhen Tang Date: Mon, 25 May 2020 17:56:23 +0800 Subject: [PATCH 18/91] Enable PV and PVC Signed-off-by: Zhizhen Tang --- cdn-server/Dockerfile | 1 + {volume => cdn-server}/html/css/app.css | 0 .../html/css/foundation.css | 0 .../html/css/foundation.min.css | 0 {volume => cdn-server}/html/favicon.ico | Bin {volume => cdn-server}/html/header.shtml | 0 .../html/icon/foundation-icons.css | 0 .../html/icon/foundation-icons.eot | Bin .../html/icon/foundation-icons.svg | 0 .../html/icon/foundation-icons.ttf | Bin .../html/icon/foundation-icons.woff | Bin .../html/image/CDN-Transcode-Sample-Arch.png | Bin .../html/image/CDN-Transcode-Sample-Arch.vsdx | Bin .../grafana/Kubernetes-Monitoring-Arch.png | Bin .../html/image/grafana/alertmanager.png | Bin .../html/image/grafana/dashboards_list.png | Bin .../html/image/grafana/home.png | Bin .../html/image/grafana/login.png | Bin .../html/image/grafana/navigation.png | Bin .../html/image/grafana/nodes_dashboard.png | Bin .../html/image/grafana/promQL.png | Bin .../html/image/grafana/prometheus.png | Bin .../image/kibana/Kubernetes-Logging-Arch.png | Bin .../image/kibana/Kubernetes-Logging-Arch.vsdx | Bin .../html/image/kibana/choose_source.png | Bin .../html/image/kibana/ffmpeg_log.png | Bin .../html/image/kibana/filter_log.png | Bin .../html/image/kibana/generate_report.png | Bin .../html/image/kibana/logs.png | Bin .../html/image/kibana/navigation.png | Bin .../html/image/kibana/save_log.png | Bin .../html/image/kibana/step_1.png | Bin .../html/image/kibana/step_2.png | Bin .../html/image/kibana/visualization_show.png | Bin .../html/image/kibana/visualization_type.png | Bin {volume => cdn-server}/html/index.html | 0 {volume => cdn-server}/html/js/app-api.js | 0 {volume => cdn-server}/html/js/app-header.js | 0 {volume => cdn-server}/html/js/app-player.js | 0 {volume => cdn-server}/html/js/app.js | 0 .../html/js/vendor/dash.all.min.js | 0 .../html/js/vendor/foundation.min.js | 0 .../html/js/vendor/hls.min.js | 0 .../html/js/vendor/jquery-3.2.1.min.js | 0 .../html/js/vendor/what-input.js | 0 {volume => cdn-server}/html/player.shtml | 0 cdn-server/nginx.conf | 14 +- deployment/kubernetes/CMakeLists.txt | 10 +- deployment/kubernetes/build.sh | 132 +++-- deployment/kubernetes/cdn-deploy.yaml.m4 | 51 ++ .../kubernetes/cdn-service-deployment.yaml.m4 | 38 -- .../kubernetes/cdn-service-service.yaml.m4 | 17 - deployment/kubernetes/cdn-svc.yaml.m4 | 18 + .../kubernetes/cpu_mem_management.cfg.m4 | 20 + .../kubernetes/docker-compose-template.yml | 153 ------ deployment/kubernetes/frame_config.py | 495 ------------------ deployment/kubernetes/frame_index.py | 143 ----- deployment/kubernetes/kafka-deploy.yaml.m4 | 61 +++ .../kafka-service-deployment.yaml.m4 | 64 --- .../kubernetes/kafka-service-service.yaml.m4 | 14 - deployment/kubernetes/kafka-svc.yaml.m4 | 12 + deployment/kubernetes/live-deploy.yaml.m4 | 48 ++ .../live-service-deployment.yaml.m4 | 31 -- deployment/kubernetes/mkvolume.sh | 32 ++ deployment/kubernetes/platform.m4 | 16 + deployment/kubernetes/redis-deploy.yaml.m4 | 33 ++ .../redis-service-deployment.yaml.m4 | 34 -- .../kubernetes/redis-service-service.yaml.m4 | 13 - deployment/kubernetes/redis-svc.yaml.m4 | 12 + deployment/kubernetes/run_with_GUI.py | 10 - deployment/kubernetes/run_with_command.py | 245 ++------- deployment/kubernetes/start.sh | 32 +- deployment/kubernetes/start_GUI.sh | 91 ---- deployment/kubernetes/stop.sh | 32 +- deployment/kubernetes/update_yaml.py | 62 +-- .../kubernetes/video-archive-pv.yaml.m4 | 32 ++ .../kubernetes/video-archive-pvc.yaml.m4 | 12 + deployment/kubernetes/video-cache-pv.yaml.m4 | 32 ++ deployment/kubernetes/video-cache-pvc.yaml.m4 | 12 + deployment/kubernetes/vod-deploy.yaml.m4 | 48 ++ .../kubernetes/vod-service-deployment.yaml.m4 | 35 -- deployment/kubernetes/yaml_utils.py | 106 +--- .../kubernetes/zookeeper-deploy.yaml.m4 | 41 ++ .../zookeeper-service-deployment.yaml.m4 | 47 -- .../zookeeper-service-service.yaml.m4 | 14 - deployment/kubernetes/zookeeper-svc.yaml.m4 | 12 + script/install_dependency.sh | 11 +- xcode-server/hardware/main.py | 4 +- xcode-server/software/main.py | 4 +- 89 files changed, 716 insertions(+), 1628 deletions(-) rename {volume => cdn-server}/html/css/app.css (100%) rename {volume => cdn-server}/html/css/foundation.css (100%) rename {volume => cdn-server}/html/css/foundation.min.css (100%) rename {volume => cdn-server}/html/favicon.ico (100%) rename {volume => cdn-server}/html/header.shtml (100%) rename {volume => cdn-server}/html/icon/foundation-icons.css (100%) rename {volume => cdn-server}/html/icon/foundation-icons.eot (100%) rename {volume => cdn-server}/html/icon/foundation-icons.svg (100%) rename {volume => cdn-server}/html/icon/foundation-icons.ttf (100%) rename {volume => cdn-server}/html/icon/foundation-icons.woff (100%) rename {volume => cdn-server}/html/image/CDN-Transcode-Sample-Arch.png (100%) rename {volume => cdn-server}/html/image/CDN-Transcode-Sample-Arch.vsdx (100%) rename {volume => cdn-server}/html/image/grafana/Kubernetes-Monitoring-Arch.png (100%) rename {volume => cdn-server}/html/image/grafana/alertmanager.png (100%) rename {volume => cdn-server}/html/image/grafana/dashboards_list.png (100%) rename {volume => cdn-server}/html/image/grafana/home.png (100%) rename {volume => cdn-server}/html/image/grafana/login.png (100%) rename {volume => cdn-server}/html/image/grafana/navigation.png (100%) rename {volume => cdn-server}/html/image/grafana/nodes_dashboard.png (100%) rename {volume => cdn-server}/html/image/grafana/promQL.png (100%) rename {volume => cdn-server}/html/image/grafana/prometheus.png (100%) rename {volume => cdn-server}/html/image/kibana/Kubernetes-Logging-Arch.png (100%) rename {volume => cdn-server}/html/image/kibana/Kubernetes-Logging-Arch.vsdx (100%) rename {volume => cdn-server}/html/image/kibana/choose_source.png (100%) rename {volume => cdn-server}/html/image/kibana/ffmpeg_log.png (100%) rename {volume => cdn-server}/html/image/kibana/filter_log.png (100%) rename {volume => cdn-server}/html/image/kibana/generate_report.png (100%) rename {volume => cdn-server}/html/image/kibana/logs.png (100%) rename {volume => cdn-server}/html/image/kibana/navigation.png (100%) rename {volume => cdn-server}/html/image/kibana/save_log.png (100%) rename {volume => cdn-server}/html/image/kibana/step_1.png (100%) rename {volume => cdn-server}/html/image/kibana/step_2.png (100%) rename {volume => cdn-server}/html/image/kibana/visualization_show.png (100%) rename {volume => cdn-server}/html/image/kibana/visualization_type.png (100%) rename {volume => cdn-server}/html/index.html (100%) rename {volume => cdn-server}/html/js/app-api.js (100%) rename {volume => cdn-server}/html/js/app-header.js (100%) rename {volume => cdn-server}/html/js/app-player.js (100%) rename {volume => cdn-server}/html/js/app.js (100%) rename {volume => cdn-server}/html/js/vendor/dash.all.min.js (100%) rename {volume => cdn-server}/html/js/vendor/foundation.min.js (100%) rename {volume => cdn-server}/html/js/vendor/hls.min.js (100%) rename {volume => cdn-server}/html/js/vendor/jquery-3.2.1.min.js (100%) rename {volume => cdn-server}/html/js/vendor/what-input.js (100%) rename {volume => cdn-server}/html/player.shtml (100%) create mode 100644 deployment/kubernetes/cdn-deploy.yaml.m4 delete mode 100644 deployment/kubernetes/cdn-service-deployment.yaml.m4 delete mode 100644 deployment/kubernetes/cdn-service-service.yaml.m4 create mode 100644 deployment/kubernetes/cdn-svc.yaml.m4 create mode 100644 deployment/kubernetes/cpu_mem_management.cfg.m4 delete mode 100644 deployment/kubernetes/docker-compose-template.yml delete mode 100755 deployment/kubernetes/frame_config.py delete mode 100755 deployment/kubernetes/frame_index.py create mode 100644 deployment/kubernetes/kafka-deploy.yaml.m4 delete mode 100644 deployment/kubernetes/kafka-service-deployment.yaml.m4 delete mode 100644 deployment/kubernetes/kafka-service-service.yaml.m4 create mode 100644 deployment/kubernetes/kafka-svc.yaml.m4 create mode 100644 deployment/kubernetes/live-deploy.yaml.m4 delete mode 100644 deployment/kubernetes/live-service-deployment.yaml.m4 create mode 100755 deployment/kubernetes/mkvolume.sh create mode 100644 deployment/kubernetes/platform.m4 create mode 100644 deployment/kubernetes/redis-deploy.yaml.m4 delete mode 100644 deployment/kubernetes/redis-service-deployment.yaml.m4 delete mode 100644 deployment/kubernetes/redis-service-service.yaml.m4 create mode 100644 deployment/kubernetes/redis-svc.yaml.m4 delete mode 100755 deployment/kubernetes/run_with_GUI.py delete mode 100755 deployment/kubernetes/start_GUI.sh create mode 100644 deployment/kubernetes/video-archive-pv.yaml.m4 create mode 100644 deployment/kubernetes/video-archive-pvc.yaml.m4 create mode 100644 deployment/kubernetes/video-cache-pv.yaml.m4 create mode 100644 deployment/kubernetes/video-cache-pvc.yaml.m4 create mode 100644 deployment/kubernetes/vod-deploy.yaml.m4 delete mode 100644 deployment/kubernetes/vod-service-deployment.yaml.m4 create mode 100644 deployment/kubernetes/zookeeper-deploy.yaml.m4 delete mode 100644 deployment/kubernetes/zookeeper-service-deployment.yaml.m4 delete mode 100644 deployment/kubernetes/zookeeper-service-service.yaml.m4 create mode 100644 deployment/kubernetes/zookeeper-svc.yaml.m4 diff --git a/cdn-server/Dockerfile b/cdn-server/Dockerfile index 1470cc3..8ecc822 100644 --- a/cdn-server/Dockerfile +++ b/cdn-server/Dockerfile @@ -5,6 +5,7 @@ Run DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no- COPY *.xsl /etc/nginx/ COPY *.conf /etc/nginx/ +COPY html /var/www/html COPY *.py /home/ CMD ["/bin/bash","-c","/home/main.py&/usr/local/sbin/nginx"] WORKDIR /home diff --git a/volume/html/css/app.css b/cdn-server/html/css/app.css similarity index 100% rename from volume/html/css/app.css rename to cdn-server/html/css/app.css diff --git a/volume/html/css/foundation.css b/cdn-server/html/css/foundation.css similarity index 100% rename from volume/html/css/foundation.css rename to cdn-server/html/css/foundation.css diff --git a/volume/html/css/foundation.min.css b/cdn-server/html/css/foundation.min.css similarity index 100% rename from volume/html/css/foundation.min.css rename to cdn-server/html/css/foundation.min.css diff --git a/volume/html/favicon.ico b/cdn-server/html/favicon.ico similarity index 100% rename from volume/html/favicon.ico rename to cdn-server/html/favicon.ico diff --git a/volume/html/header.shtml b/cdn-server/html/header.shtml similarity index 100% rename from volume/html/header.shtml rename to cdn-server/html/header.shtml diff --git a/volume/html/icon/foundation-icons.css b/cdn-server/html/icon/foundation-icons.css similarity index 100% rename from volume/html/icon/foundation-icons.css rename to cdn-server/html/icon/foundation-icons.css diff --git a/volume/html/icon/foundation-icons.eot b/cdn-server/html/icon/foundation-icons.eot similarity index 100% rename from volume/html/icon/foundation-icons.eot rename to cdn-server/html/icon/foundation-icons.eot diff --git a/volume/html/icon/foundation-icons.svg b/cdn-server/html/icon/foundation-icons.svg similarity index 100% rename from volume/html/icon/foundation-icons.svg rename to cdn-server/html/icon/foundation-icons.svg diff --git a/volume/html/icon/foundation-icons.ttf b/cdn-server/html/icon/foundation-icons.ttf similarity index 100% rename from volume/html/icon/foundation-icons.ttf rename to cdn-server/html/icon/foundation-icons.ttf diff --git a/volume/html/icon/foundation-icons.woff b/cdn-server/html/icon/foundation-icons.woff similarity index 100% rename from volume/html/icon/foundation-icons.woff rename to cdn-server/html/icon/foundation-icons.woff diff --git a/volume/html/image/CDN-Transcode-Sample-Arch.png b/cdn-server/html/image/CDN-Transcode-Sample-Arch.png similarity index 100% rename from volume/html/image/CDN-Transcode-Sample-Arch.png rename to cdn-server/html/image/CDN-Transcode-Sample-Arch.png diff --git a/volume/html/image/CDN-Transcode-Sample-Arch.vsdx b/cdn-server/html/image/CDN-Transcode-Sample-Arch.vsdx similarity index 100% rename from volume/html/image/CDN-Transcode-Sample-Arch.vsdx rename to cdn-server/html/image/CDN-Transcode-Sample-Arch.vsdx diff --git a/volume/html/image/grafana/Kubernetes-Monitoring-Arch.png b/cdn-server/html/image/grafana/Kubernetes-Monitoring-Arch.png similarity index 100% rename from volume/html/image/grafana/Kubernetes-Monitoring-Arch.png rename to cdn-server/html/image/grafana/Kubernetes-Monitoring-Arch.png diff --git a/volume/html/image/grafana/alertmanager.png b/cdn-server/html/image/grafana/alertmanager.png similarity index 100% rename from volume/html/image/grafana/alertmanager.png rename to cdn-server/html/image/grafana/alertmanager.png diff --git a/volume/html/image/grafana/dashboards_list.png b/cdn-server/html/image/grafana/dashboards_list.png similarity index 100% rename from volume/html/image/grafana/dashboards_list.png rename to cdn-server/html/image/grafana/dashboards_list.png diff --git a/volume/html/image/grafana/home.png b/cdn-server/html/image/grafana/home.png similarity index 100% rename from volume/html/image/grafana/home.png rename to cdn-server/html/image/grafana/home.png diff --git a/volume/html/image/grafana/login.png b/cdn-server/html/image/grafana/login.png similarity index 100% rename from volume/html/image/grafana/login.png rename to cdn-server/html/image/grafana/login.png diff --git a/volume/html/image/grafana/navigation.png b/cdn-server/html/image/grafana/navigation.png similarity index 100% rename from volume/html/image/grafana/navigation.png rename to cdn-server/html/image/grafana/navigation.png diff --git a/volume/html/image/grafana/nodes_dashboard.png b/cdn-server/html/image/grafana/nodes_dashboard.png similarity index 100% rename from volume/html/image/grafana/nodes_dashboard.png rename to cdn-server/html/image/grafana/nodes_dashboard.png diff --git a/volume/html/image/grafana/promQL.png b/cdn-server/html/image/grafana/promQL.png similarity index 100% rename from volume/html/image/grafana/promQL.png rename to cdn-server/html/image/grafana/promQL.png diff --git a/volume/html/image/grafana/prometheus.png b/cdn-server/html/image/grafana/prometheus.png similarity index 100% rename from volume/html/image/grafana/prometheus.png rename to cdn-server/html/image/grafana/prometheus.png diff --git a/volume/html/image/kibana/Kubernetes-Logging-Arch.png b/cdn-server/html/image/kibana/Kubernetes-Logging-Arch.png similarity index 100% rename from volume/html/image/kibana/Kubernetes-Logging-Arch.png rename to cdn-server/html/image/kibana/Kubernetes-Logging-Arch.png diff --git a/volume/html/image/kibana/Kubernetes-Logging-Arch.vsdx b/cdn-server/html/image/kibana/Kubernetes-Logging-Arch.vsdx similarity index 100% rename from volume/html/image/kibana/Kubernetes-Logging-Arch.vsdx rename to cdn-server/html/image/kibana/Kubernetes-Logging-Arch.vsdx diff --git a/volume/html/image/kibana/choose_source.png b/cdn-server/html/image/kibana/choose_source.png similarity index 100% rename from volume/html/image/kibana/choose_source.png rename to cdn-server/html/image/kibana/choose_source.png diff --git a/volume/html/image/kibana/ffmpeg_log.png b/cdn-server/html/image/kibana/ffmpeg_log.png similarity index 100% rename from volume/html/image/kibana/ffmpeg_log.png rename to cdn-server/html/image/kibana/ffmpeg_log.png diff --git a/volume/html/image/kibana/filter_log.png b/cdn-server/html/image/kibana/filter_log.png similarity index 100% rename from volume/html/image/kibana/filter_log.png rename to cdn-server/html/image/kibana/filter_log.png diff --git a/volume/html/image/kibana/generate_report.png b/cdn-server/html/image/kibana/generate_report.png similarity index 100% rename from volume/html/image/kibana/generate_report.png rename to cdn-server/html/image/kibana/generate_report.png diff --git a/volume/html/image/kibana/logs.png b/cdn-server/html/image/kibana/logs.png similarity index 100% rename from volume/html/image/kibana/logs.png rename to cdn-server/html/image/kibana/logs.png diff --git a/volume/html/image/kibana/navigation.png b/cdn-server/html/image/kibana/navigation.png similarity index 100% rename from volume/html/image/kibana/navigation.png rename to cdn-server/html/image/kibana/navigation.png diff --git a/volume/html/image/kibana/save_log.png b/cdn-server/html/image/kibana/save_log.png similarity index 100% rename from volume/html/image/kibana/save_log.png rename to cdn-server/html/image/kibana/save_log.png diff --git a/volume/html/image/kibana/step_1.png b/cdn-server/html/image/kibana/step_1.png similarity index 100% rename from volume/html/image/kibana/step_1.png rename to cdn-server/html/image/kibana/step_1.png diff --git a/volume/html/image/kibana/step_2.png b/cdn-server/html/image/kibana/step_2.png similarity index 100% rename from volume/html/image/kibana/step_2.png rename to cdn-server/html/image/kibana/step_2.png diff --git a/volume/html/image/kibana/visualization_show.png b/cdn-server/html/image/kibana/visualization_show.png similarity index 100% rename from volume/html/image/kibana/visualization_show.png rename to cdn-server/html/image/kibana/visualization_show.png diff --git a/volume/html/image/kibana/visualization_type.png b/cdn-server/html/image/kibana/visualization_type.png similarity index 100% rename from volume/html/image/kibana/visualization_type.png rename to cdn-server/html/image/kibana/visualization_type.png diff --git a/volume/html/index.html b/cdn-server/html/index.html similarity index 100% rename from volume/html/index.html rename to cdn-server/html/index.html diff --git a/volume/html/js/app-api.js b/cdn-server/html/js/app-api.js similarity index 100% rename from volume/html/js/app-api.js rename to cdn-server/html/js/app-api.js diff --git a/volume/html/js/app-header.js b/cdn-server/html/js/app-header.js similarity index 100% rename from volume/html/js/app-header.js rename to cdn-server/html/js/app-header.js diff --git a/volume/html/js/app-player.js b/cdn-server/html/js/app-player.js similarity index 100% rename from volume/html/js/app-player.js rename to cdn-server/html/js/app-player.js diff --git a/volume/html/js/app.js b/cdn-server/html/js/app.js similarity index 100% rename from volume/html/js/app.js rename to cdn-server/html/js/app.js diff --git a/volume/html/js/vendor/dash.all.min.js b/cdn-server/html/js/vendor/dash.all.min.js similarity index 100% rename from volume/html/js/vendor/dash.all.min.js rename to cdn-server/html/js/vendor/dash.all.min.js diff --git a/volume/html/js/vendor/foundation.min.js b/cdn-server/html/js/vendor/foundation.min.js similarity index 100% rename from volume/html/js/vendor/foundation.min.js rename to cdn-server/html/js/vendor/foundation.min.js diff --git a/volume/html/js/vendor/hls.min.js b/cdn-server/html/js/vendor/hls.min.js similarity index 100% rename from volume/html/js/vendor/hls.min.js rename to cdn-server/html/js/vendor/hls.min.js diff --git a/volume/html/js/vendor/jquery-3.2.1.min.js b/cdn-server/html/js/vendor/jquery-3.2.1.min.js similarity index 100% rename from volume/html/js/vendor/jquery-3.2.1.min.js rename to cdn-server/html/js/vendor/jquery-3.2.1.min.js diff --git a/volume/html/js/vendor/what-input.js b/cdn-server/html/js/vendor/what-input.js similarity index 100% rename from volume/html/js/vendor/what-input.js rename to cdn-server/html/js/vendor/what-input.js diff --git a/volume/html/player.shtml b/cdn-server/html/player.shtml similarity index 100% rename from volume/html/player.shtml rename to cdn-server/html/player.shtml diff --git a/cdn-server/nginx.conf b/cdn-server/nginx.conf index cbf1996..64b50a1 100644 --- a/cdn-server/nginx.conf +++ b/cdn-server/nginx.conf @@ -20,7 +20,7 @@ rtmp { application hls { live on; hls on; - hls_path /var/www/hls; + hls_path /var/www/video/hls; hls_nested on; hls_fragment 3; hls_playlist_length 60; @@ -32,7 +32,7 @@ rtmp { application dash { live on; dash on; - dash_path /var/www/dash; + dash_path /var/www/video/dash; dash_fragment 4; dash_playlist_length 120; dash_nested on; @@ -67,7 +67,7 @@ http { limit_req_zone $binary_remote_addr zone=allips:10m rate=200r/s; server { - listen 8080 ssl; + listen 8443 ssl; server_name _; ssl_certificate /var/run/secrets/self.crt; @@ -116,7 +116,7 @@ http { } location /hls/ { - root /var/www; + root /var/www/video; add_header Cache-Control no-cache; add_header 'Access-Control-Allow-Origin' '*' always; add_header 'Access-Control-Expose-Headers' 'Content-Length'; @@ -127,7 +127,7 @@ http { } location /dash/ { - root /var/www; + root /var/www/video; add_header Cache-Control no-cache; add_header 'Access-Control-Allow-Origin' '*' always; add_header 'Access-Control-Expose-Headers' 'Content-Length'; @@ -137,7 +137,7 @@ http { } location ~* /dash/.*/index.mpd$ { - root /var/www; + root /var/www/video; add_header Cache-Control no-cache; add_header 'Access-Control-Allow-Origin' '*' always; add_header 'Access-Control-Expose-Headers' 'Content-Length'; @@ -148,7 +148,7 @@ http { } location ~* /hls/.*/index.m3u8$ { - root /var/www; + root /var/www/video; add_header Cache-Control no-cache; add_header 'Access-Control-Allow-Origin' '*' always; add_header 'Access-Control-Expose-Headers' 'Content-Length'; diff --git a/deployment/kubernetes/CMakeLists.txt b/deployment/kubernetes/CMakeLists.txt index 74aa1cd..99defd6 100644 --- a/deployment/kubernetes/CMakeLists.txt +++ b/deployment/kubernetes/CMakeLists.txt @@ -1,4 +1,12 @@ set(service "kubernetes") include("${CMAKE_SOURCE_DIR}/script/service.cmake") include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") -include("${CMAKE_SOURCE_DIR}/script/scan-all.cmake") + +# add cleanup files +file(GLOB m4files "${CMAKE_CURRENT_SOURCE_DIR}/*.yaml.m4") +foreach(m4file ${m4files}) + string(REPLACE ".yaml.m4" ".yaml" yamlfile "${m4file}") + set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${yamlfile}") +endforeach(m4file) + +add_custom_target(volume ${CMAKE_CURRENT_SOURCE_DIR}/mkvolume.sh) diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh index 8a5eb80..1828507 100755 --- a/deployment/kubernetes/build.sh +++ b/deployment/kubernetes/build.sh @@ -5,63 +5,103 @@ NVODS="${1:-1}" NLIVES="${2:-1}" REGISTRY="$3" +HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') + echo "Generating templates with NVODS=${NVODS}, NLIVES=${NLIVES}" find "${DIR}" -maxdepth 1 -name "*.yaml" -exec rm -rf "{}" \; find "${DIR}" -maxdepth 1 -name "*.cfg" -exec rm -rf "{}" \; +rm -rf "$DIR/../../volume/video/hls" +rm -rf "$DIR/../../volume/video/dash" +mkdir -p "$DIR/../../volume/video/hls" +mkdir -p "$DIR/../../volume/video/dash" -for template in $(find "${DIR}" -maxdepth 1 -name "*.yaml.m4" -print); do - yaml=${template/.m4/} - m4 -DNVODS=${NVODS} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${template}" > "${yaml}" -done +export CDN_CPU_REQUEST=2 +export CDN_MEM_REQUEST=2000Mi +export REDIS_CPU_REQUEST=1 +export REDIS_MEM_REQUEST=500Mi +export ZOOKEEPER_CPU_REQUEST=1 +export ZOOKEEPER_MEM_REQUEST=500Mi +export KAFKA_CPU_REQUEST=1 +export KAFKA_MEM_REQUEST=500Mi +export VOD_CPU_REQUEST=3 +export VOD_MEM_REQUEST=3000Mi +export LIVE_CPU_REQUEST=4 +export LIVE_MEM_REQUEST=3000Mi -cat <> ${DIR}/cpu_mem_managerment.cfg -[cdn] -cpu = 2 -mem = 2000 -[redis] -cpu = 1 -mem = 500 -[zookeeper] -cpu = 1 -mem = 500 -[kafka] -cpu = 1 -mem = 500 -EOF +export STREAM_NAME=bbb_sunflower_1080p_30fps_normal.mp4 +export STREAM_WIDTH=856 +export STREAM_HEIGHT=480 +export STREAM_ENCODE_BITRATE=8M +export STREAM_ENCODE_FRAMERATE=30 +export STREAM_ENCODE_GOP=100 +export STREAM_ENCODE_MAXBFRAMES=2 +export STREAM_ENCODE_REFSNUM=2 +export STREAM_ENCODE_PRESET=veryfast +export STREAM_ENCODE_TYPE=AVC +export STREAM_ENCODE_HWACCEL=false +export STREAM_ENCODE_PROTOCOL=HLS +export STREAM_ENCODE_DENSITY=2 -for ((VODIDX=0;VODIDX<${NVODS};VODIDX++)); do - cat <> ${DIR}/cpu_mem_managerment.cfg -[vod${VODIDX}] -cpu = 3 -mem = 3000 -EOF - cat <> ${DIR}/vod-transcode.cfg -[vod${VODIDX}] -hwaccel = false -EOF +hosts=($(kubectl get node -l xeone3-zone!=yes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}:{range @.spec.taints[*]}{@.key}={@.effect};{end}{end}' | grep Ready=True | grep -v NoSchedule | cut -f1 -d':')) + +echo $hosts + +if test ${#hosts[@]} -eq 0; then + printf "\nFailed to locate worker node(s) for shared storage\n\n" + exit -1 +elif test ${#hosts[@]} -lt 2; then + hosts=(${hosts[0]} ${hosts[0]}) +fi + +export VIDEO_ARCHIVE_VOLUME_PATH=/tmp/archive/video +export VIDEO_ARCHIVE_VOLUME_SIZE=2 +export VIDEO_ARCHIVE_VOLUME_HOST=${hosts[1]} + +export VIDEO_CACHE_VOLUME_PATH=/tmp/cache/video +export VIDEO_CACHE_VOLUME_SIZE=2 +export VIDEO_CACHE_VOLUME_HOST=${hosts[1]} + +for template in $(find "${DIR}" -maxdepth 1 -name "*yaml.m4" -print); do + if [[ -n $(grep LIVEIDX "$template") ]]; then + for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do + yaml=${template/-deploy.yaml.m4/-${LIVEIDX}-deploy.yaml} + m4 -DLIVEIDX=${LIVEIDX} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${template}" > "${yaml}" + done + elif [[ -n $(grep NVODS "$template") ]] && [[ ${NVODS} -eq 0 ]]; then + continue + else + yaml=${template/.m4/} + m4 -DNVODS=${NVODS} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${yaml}" + fi +done + +for template in $(find "${DIR}" -maxdepth 1 -name "*cfg.m4" -print); do + cfg=${template/.m4/} + m4 $(env | grep _REQUEST | sed 's/^/-D/') -I "${DIR}" "${template}" > "${cfg}" done for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do - cat <> ${DIR}/cpu_mem_managerment.cfg -[live${LIVEIDX}] -cpu = 4 -mem = 3000 + cat <> ${DIR}/cpu_mem_management.cfg + +[live-${LIVEIDX}] +cpu = ${LIVE_CPU_REQUEST} +mem = ${LIVE_MEM_REQUEST} EOF cat <> ${DIR}/live-transcode.cfg -[live${LIVEIDX}] -url = bbb_sunflower_1080p_30fps_normal.mp4 -width_height = 856x480 -bitrate = 8000000 -framerate = 25 -gop = 100 -maxbFrames = 2 -refsNum = 2 -rcMode = 0 -preset = veryfast -encoder_type = AVC -protocol = HLS -hwaccel = false -density = 2 +[live-${LIVEIDX}] +url = ${STREAM_NAME} +width = ${STREAM_WIDTH} +height = ${STREAM_HEIGHT} +bitrate = ${STREAM_ENCODE_BITRATE} +framerate = ${STREAM_ENCODE_FRAMERATE} +gop = ${STREAM_ENCODE_GOP} +maxbFrames = ${STREAM_ENCODE_MAXBFRAMES} +refsNum = ${STREAM_ENCODE_REFSNUM} +preset = ${STREAM_ENCODE_PRESET} +encodeType = ${STREAM_ENCODE_TYPE} +hwaccel = ${STREAM_ENCODE_HWACCEL} +protocol = ${STREAM_ENCODE_PROTOCOL} +density = ${STREAM_ENCODE_DENSITY} EOF done diff --git a/deployment/kubernetes/cdn-deploy.yaml.m4 b/deployment/kubernetes/cdn-deploy.yaml.m4 new file mode 100644 index 0000000..e573d3c --- /dev/null +++ b/deployment/kubernetes/cdn-deploy.yaml.m4 @@ -0,0 +1,51 @@ +include(platform.m4) +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cdn + labels: + app: cdn +spec: + replicas: 1 + selector: + matchLabels: + app: cdn + template: + metadata: + labels: + app: cdn + spec: + enableServiceLinks: false + containers: + - name: cdn + image: defn(`REGISTRY_PREFIX')ovc_cdn_service:latest + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8443 + - containerPort: 1935 + resources: + limits: + cpu: 4 + memory: 4000Mi + requests: + cpu: 2 + memory: 2000Mi + volumeMounts: + - mountPath: /var/www/archive + name: video-archive + - mountPath: /var/www/video + name: video-cache + - mountPath: /var/run/secrets + name: self-signed-certificate + readOnly: true + volumes: + - name: video-archive + persistentVolumeClaim: + claimName: video-archive + - name: video-cache + persistentVolumeClaim: + claimName: video-cache + - name: self-signed-certificate + secret: + secretName: self-signed-certificate +PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/cdn-service-deployment.yaml.m4 b/deployment/kubernetes/cdn-service-deployment.yaml.m4 deleted file mode 100644 index e7a076b..0000000 --- a/deployment/kubernetes/cdn-service-deployment.yaml.m4 +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: cdn-service - name: cdn-service -spec: - selector: - matchLabels: - app: cdn-service - replicas: 1 - template: - metadata: - creationTimestamp: null - labels: - app: cdn-service - spec: - containers: - - args: - - bash - - -c - - /home/main.py&/usr/local/sbin/nginx - image: defn(`REGISTRY_PREFIX')ovc_cdn_service:latest - imagePullPolicy: IfNotPresent - name: cdn-service - ports: - - containerPort: 8080 - - containerPort: 1935 - resources: - limits: - cpu: "3" - memory: 3145728e3 - requests: - cpu: 1500m - memory: 1572864e3 - nodeSelector: - kubernetes.io/hostname: master.machine - restartPolicy: Always diff --git a/deployment/kubernetes/cdn-service-service.yaml.m4 b/deployment/kubernetes/cdn-service-service.yaml.m4 deleted file mode 100644 index 14ee3aa..0000000 --- a/deployment/kubernetes/cdn-service-service.yaml.m4 +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: cdn-service - name: cdn-service -spec: - ports: - - name: "8080" - port: 8080 - targetPort: 8080 - - name: "1935" - port: 1935 - targetPort: 1935 - selector: - app: cdn-service - type: NodePort diff --git a/deployment/kubernetes/cdn-svc.yaml.m4 b/deployment/kubernetes/cdn-svc.yaml.m4 new file mode 100644 index 0000000..6d2e4c2 --- /dev/null +++ b/deployment/kubernetes/cdn-svc.yaml.m4 @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: cdn-service + labels: + app: cdn +spec: + ports: + - port: 443 + targetPort: 8443 + name: https + - port: 1935 + protocol: TCP + name: tcp + externalIPs: + - defn(`HOSTIP') + selector: + app: cdn diff --git a/deployment/kubernetes/cpu_mem_management.cfg.m4 b/deployment/kubernetes/cpu_mem_management.cfg.m4 new file mode 100644 index 0000000..7ae4246 --- /dev/null +++ b/deployment/kubernetes/cpu_mem_management.cfg.m4 @@ -0,0 +1,20 @@ +# Minimum resource requirements on container runtime, the maximum resource requirements is 2x minimum. +[cdn] +cpu = defn(`CDN_CPU_REQUEST') +mem = defn(`CDN_MEM_REQUEST') + +[redis] +cpu = defn(`REDIS_CPU_REQUEST') +mem = defn(`REDIS_MEM_REQUEST') + +[zookeeper] +cpu = defn(`ZOOKEEPER_CPU_REQUEST') +mem = defn(`ZOOKEEPER_MEM_REQUEST') + +[kafka] +cpu = defn(`KAFKA_CPU_REQUEST') +mem = defn(`KAFKA_MEM_REQUEST') + +[vod] +cpu = defn(`VOD_CPU_REQUEST') +mem = defn(`VOD_MEM_REQUEST') diff --git a/deployment/kubernetes/docker-compose-template.yml b/deployment/kubernetes/docker-compose-template.yml deleted file mode 100644 index 2680631..0000000 --- a/deployment/kubernetes/docker-compose-template.yml +++ /dev/null @@ -1,153 +0,0 @@ -version: '3.1' - -services: - - redis-service: - image: redis:latest - ports: - - "6379:6379" - command: - redis-server - deploy: - replicas: 1 - resources: - limits: - cpus: "2" - memory: 1000M - reservations: - cpus: "1" - memory: 500M - placement: - constraints: - - node.hostname == master.machine - - zookeeper-service: - image: zookeeper:latest - ports: - - "2181:2181" - environment: - ZOOKEEPER_SERVER_ID: 1 - ZOOKEEPER_CLIENT_PORT: '2181' - ZOOKEEPER_TICK_TIME: '2000' - ZOOKEEPER_HEAP_OPTS: '-Xmx2048m -Xms2048m' - ZOOKEEPER_MAX_CLIENT_CNXNS: '20000' - ZOOKEEPER_LOG4J_LOGGERS: 'zookeepr=ERROR' - ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'ERROR' - restart: always - deploy: - replicas: 1 - resources: - limits: - cpus: "2" - memory: 1000M - reservations: - cpus: "1" - memory: 500M - placement: - constraints: - - node.hostname == master.machine - labels: - kompose.service.type: NodePort - - kafka-service: - image: wurstmeister/kafka:latest - ports: - - "9092:9092" - depends_on: - - zookeeper-service - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ADVERTISED_HOST_NAME: 'kafka-service' - KAFKA_ADVERTISED_PORT: '9092' - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-service:2181' - KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka-service:9092' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'PLAINTEXT:PLAINTEXT' - KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_DEFAULT_REPLICATION_FACTOR: 1 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' - KAFKA_NUM_PARTITIONS: 16 - KAFKA_CREATE_TOPICS: 'content_provider_sched:16:1' - KAFKA_LOG_RETENTION_HOURS: 8 - KAFKA_HEAP_OPTS: '-Xmx1024m -Xms1024m' - KAFKA_LOG4J_LOGGERS: 'kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR' - KAFKA_LOG4J_ROOT_LOGLEVEL: 'ERROR' - restart: always - deploy: - replicas: 1 - resources: - limits: - cpus: "2" - memory: 1000M - reservations: - cpus: "1" - memory: 500M - placement: - constraints: - - node.hostname == master.machine - labels: - kompose.service.type: NodePort - - cdn-service: - image: ovc_cdn_service:latest - ports: - - "8080:8080" - - "1935:1935" - depends_on: - - kafka-service - deploy: - replicas: 1 - resources: - limits: - cpus: "3" - memory: 3000M - reservations: - cpus: "1.5" - memory: 1500M - placement: - constraints: - - node.hostname == master.machine - command: ["bash", "-c", "/home/main.py&/usr/local/sbin/nginx"] - labels: - kompose.service.type: NodePort - kompose.image-pull-policy: IfNotPresent - - vod-service: - image: ovc_software_transcode_service:latest - deploy: - replicas: 1 - resources: - limits: - cpus: "6" - memory: 6000M - reservations: - cpus: "3" - memory: 3000M - placement: - constraints: - - node.hostname == master.machine - depends_on: - - kafka-service - - zookeeper-service - command: ["bash", "-c", "/home/main.py"] - labels: - kompose.image-pull-policy: IfNotPresent - - live-service: - image: ovc_software_transcode_service:latest - deploy: - replicas: 1 - resources: - limits: - cpus: "6" - memory: 6000M - reservations: - cpus: "3" - memory: 3000M - placement: - constraints: - - node.hostname == master.machine - depends_on: - - cdn-service - labels: - kompose.image-pull-policy: IfNotPresent diff --git a/deployment/kubernetes/frame_config.py b/deployment/kubernetes/frame_config.py deleted file mode 100755 index a743df3..0000000 --- a/deployment/kubernetes/frame_config.py +++ /dev/null @@ -1,495 +0,0 @@ -#!/usr/bin/python3 - -import wx -import wx.xrc -import re -import threading -import os -import sys -from functools import reduce -import update_yaml - -class MyFrame_Config (wx.Frame): - - def __init__(self, parent, nfs_server, volume_directory, video_list): - self.nfs_server = nfs_server - self.volume_directory = volume_directory - self.video_list = video_list - - self.choice_list = [] - self.node_dict = {} - basic_info = os.popen("kubectl describe node").read() - index_list = [i.start() for i in re.finditer("Name:", basic_info)] - for i in range(len(index_list)): - cpu_info = re.findall( - "(\d+)", os.popen("kubectl describe node | awk -F ' ' '$1==\"cpu\"' |awk 'NR==" + str(i+1) + "'").read()) - memory_info = re.findall( - "(\d+)", os.popen("kubectl describe node | awk -F ' ' '$1==\"memory\" {print $0}'").read()) - cpu = int(int(re.search( - "cpu:\s+(\d+)", basic_info[index_list[i]: -1]).group(1)) - int(cpu_info[0])/1000) - memory = int((int(re.search( - "memory:\s+(\d+)", basic_info[index_list[i]: -1]).group(1)) / 1024 - int(memory_info[0]))) - if cpu > 0 and memory > 0: - self.choice_list.append({"nodename": re.search( - "Name:\s+(.+)", basic_info[index_list[i]: -1]).group(1), "cpu": cpu, "memory": memory}) - self.node_dict[re.search( - "Name:\s+(.+)", basic_info[index_list[i]: -1]).group(1)] = {"cpu": cpu, "memory": memory} - - self.setsize_num = 0 - self.live_num = 0 - self.vod_num = 0 - wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u"config", pos=wx.DefaultPosition, size=wx.Size( - 924, 525), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL) - self.Bind(wx.EVT_CLOSE, self.OnClose) - - self.SetSizeHints(wx.DefaultSize, wx.DefaultSize) - self.SetBackgroundColour( - wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)) - - frame_bSizer = wx.BoxSizer(wx.VERTICAL) - - self.config_panel = wx.Panel( - self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_RAISED) - config_bSizer = wx.BoxSizer(wx.HORIZONTAL) - - self.pod_panel = wx.ScrolledWindow( - self.config_panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL) - self.pod_panel.SetScrollRate(5, 5) - pod_bSizer = wx.BoxSizer(wx.VERTICAL) - - self.modulelist = ["cdn", "redis", "zookeeper", "kafka", "vod", "live"] - self.creat_modules_button(pod_bSizer) - - self.pod_panel.SetSizer(pod_bSizer) - self.pod_panel.Layout() - pod_bSizer.Fit(self.pod_panel) - config_bSizer.Add(self.pod_panel, 1, wx.ALL | wx.EXPAND, 5) - - self.arguments_panel = wx.Panel( - self.config_panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL) - arguments_bSizer = wx.BoxSizer(wx.VERTICAL) - - arguments_label = wx.StaticBoxSizer(wx.StaticBox( - self.arguments_panel, wx.ID_ANY, u"cdn_config"), wx.VERTICAL) - self.arguments_label = arguments_label - - self.pods_dict = {"cdn": {}, "redis": {}, - "zookeeper": {}, "kafka": {}, "vod": {}, "live": {}} - for key, value in self.pods_dict.items(): - self.create_pod_panel(key, arguments_label) - - arguments_bSizer.Add(arguments_label, 1, wx.EXPAND, 5) - - self.arguments_panel.SetSizer(arguments_bSizer) - self.arguments_panel.Layout() - arguments_bSizer.Fit(self.arguments_panel) - config_bSizer.Add(self.arguments_panel, 1, wx.EXPAND | wx.ALL, 5) - - self.config_panel.SetSizer(config_bSizer) - self.config_panel.Layout() - config_bSizer.Fit(self.config_panel) - frame_bSizer.Add(self.config_panel, 1, wx.EXPAND | wx.ALL, 5) - - self.menu_panel = wx.Panel( - self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL) - self.menu_panel.SetMaxSize(wx.Size(-1, 50)) - - menu_bSizer = wx.BoxSizer(wx.VERTICAL) - - menu_sdbSizer = wx.StdDialogButtonSizer() - self.menu_sdbSizerOK = wx.Button(self.menu_panel, wx.ID_OK) - menu_sdbSizer.AddButton(self.menu_sdbSizerOK) - self.menu_sdbSizerCancel = wx.Button(self.menu_panel, wx.ID_CANCEL) - menu_sdbSizer.AddButton(self.menu_sdbSizerCancel) - menu_sdbSizer.Realize() - - menu_bSizer.Add(menu_sdbSizer, 1, wx.ALIGN_CENTER_HORIZONTAL, 5) - - self.menu_panel.SetSizer(menu_bSizer) - self.menu_panel.Layout() - menu_bSizer.Fit(self.menu_panel) - frame_bSizer.Add(self.menu_panel, 1, wx.ALL | wx.EXPAND, 5) - - self.log_panel = wx.Panel( - self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL) - log_bSizer = wx.BoxSizer(wx.VERTICAL) - - self.log_textCtrl = wx.TextCtrl( - self.log_panel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_MULTILINE) - log_bSizer.Add(self.log_textCtrl, 1, wx.ALL | wx.EXPAND, 5) - - self.log_panel.SetSizer(log_bSizer) - self.log_panel.Layout() - log_bSizer.Fit(self.log_panel) - frame_bSizer.Add(self.log_panel, 1, wx.EXPAND | wx.ALL, 5) - - self.SetSizer(frame_bSizer) - self.Layout() - - self.Centre(wx.VERTICAL) - - self.menu_sdbSizerCancel.Bind( - wx.EVT_BUTTON, self.menu_sdbSizerOnCancelButtonClick) - self.menu_sdbSizerOK.Bind( - wx.EVT_BUTTON, self.menu_sdbSizerOnOKButtonClick) - - MyFrame_Config.show_pod_panel("cdn")(self, wx.EVT_BUTTON) - - def __del__(self): - pass - - def OnClose(self, event): - sys.exit(1) - - def creat_modules_button(self, pod_bSizer): - for i in self.modulelist: - setattr(self, i + "_button", wx.Button(self.pod_panel, - wx.ID_ANY, i, wx.DefaultPosition, wx.DefaultSize, 0)) - pod_bSizer.Add(getattr(self, i + "_button"), 0, wx.ALL, 5) - - def create_pod_panel(self, podname, arguments_label): - if podname == "live" or podname == "vod": - self.creat_module_panel(arguments_label, podname) - self.pods_dict[podname] = { - 'node': None, 'cpu': None, 'memory': None} - else: - setattr(self, podname + "_panel", wx.ScrolledWindow(arguments_label.GetStaticBox(), - wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL)) - getattr(self, podname + "_panel").SetScrollRate(5, 5) - globals()[podname + "_bSizer"] = wx.BoxSizer(wx.VERTICAL) - for j in ["node", "cpu", "memory"]: - setattr(self, podname + "_" + j + "_panel", wx.Panel(getattr(self, podname + - "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)) - globals()[podname + "_" + j + - "_gSizer"] = wx.GridSizer(0, 2, 0, 0) - setattr(self, podname + "_" + j + "_staticText", wx.StaticText(getattr(self, - podname + "_" + j + "_panel"), wx.ID_ANY, j, wx.DefaultPosition, wx.DefaultSize, 0)) - getattr(self, podname + "_" + j + "_staticText").Wrap(-1) - globals()[podname + "_" + j + "_gSizer"].Add(getattr(self, - podname + "_" + j + "_staticText"), 0, wx.ALL, 5) - globals()[podname + "_" + "node" + "_choiceChoices"] = [node_dict["nodename"] - for node_dict in self.choice_list] - - globals()[podname + "_" + j + "_choiceChoices"] = [item["nodename"] - for item in self.choice_list] if j == "node" else [] - setattr(self, podname + "_" + j + "_choice", wx.Choice(getattr(self, podname + "_" + j + "_panel"), - wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, globals()[podname + "_" + j + "_choiceChoices"], 0)) - getattr(self, podname + "_" + j + "_choice").SetSelection(-1) - globals()[podname + "_" + j + "_gSizer"].Add(getattr(self, - podname + "_" + j + "_choice"), 0, wx.ALL, 5) - getattr(self, podname + "_" + j + "_panel").SetSizer(globals() - [podname + "_" + j + "_gSizer"]) - getattr(self, podname + "_" + j + "_panel").Layout() - globals()[ - podname + "_" + j + "_gSizer"].Fit(getattr(self, podname + "_" + j + "_panel")) - globals()[podname + "_bSizer"].Add(getattr(self, - podname + "_" + j + "_panel"), 1, wx.EXPAND | wx.ALL, 5) - self.pods_dict[podname][j] = None - - if re.search("[live|vod]\d", podname): - self.choice_dict = {"input": self.video_list, "mode": {"sw": ["AVC", "HEVC", "AV1"], "hw": ["AVC", "HEVC"]}, "protocol": [ - "HLS", "DASH"], "resolution": ["856:480", "1280:720", "1920:1080", "2560:1440"], "bitrate": [str(i+5) for i in range(15)]} - panel_list = ["mode", "input"] if re.search( - "live\d", podname) else ["mode"] - - self.pods_dict[podname]["mode"] = None - if re.search("live\d", podname): - self.pods_dict[podname]['input'] = None - for num in range(4): - self.pods_dict[podname]['transcode' + str(num)] = { - "codec": None, "protocol": None, "resolution": None, "bitrate": None, "output": None} - - for panel_name in panel_list: - setattr(self, podname + "_" + panel_name + "_panel", wx.Panel(getattr(self, podname + - "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)) - globals()[podname + "_" + panel_name + - "_gSizer"] = wx.GridSizer(0, 2, 0, 0) - - setattr(self, podname + "_" + panel_name + "_staticText", wx.StaticText(getattr(self, podname + - "_" + panel_name + "_panel"), wx.ID_ANY, panel_name, wx.DefaultPosition, wx.DefaultSize, 0)) - getattr(self, podname + "_" + - panel_name + "_staticText").Wrap(-1) - - globals()[podname + "_" + panel_name + "_gSizer"].Add(getattr(self, - podname + "_" + panel_name + "_staticText"), 0, wx.ALL, 5) - - globals()[podname + "_" + panel_name + - "_choiceChoices"] = self.choice_dict[panel_name] if panel_name == "input" else ["SW", "HW"] - setattr(self, podname + "_" + panel_name + "_choice", wx.Choice(getattr(self, podname + "_" + panel_name + "_panel"), - wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, globals()[podname + "_" + panel_name + "_choiceChoices"], 0)) - getattr(self, podname + "_" + panel_name + - "_choice").SetSelection(-1) - globals()[podname + "_" + panel_name + "_gSizer"].Add(getattr(self, - podname + "_" + panel_name + "_choice"), 0, wx.ALL, 5) - - getattr(self, podname + "_" + panel_name + "_panel").SetSizer( - globals()[podname + "_" + panel_name + "_gSizer"]) - getattr(self, podname + "_" + - panel_name + "_panel").Layout() - globals()[podname + "_" + panel_name + "_gSizer"].Fit( - getattr(self, podname + "_" + panel_name + "_panel")) - globals()[podname + "_bSizer"].Add(getattr(self, podname + - "_" + panel_name + "_panel"), 1, wx.EXPAND | wx.ALL, 5) - - if panel_name == "mode" and re.search("live\d", podname): - setattr(MyFrame_Config, podname + "_mode_choiceOnChoice", - MyFrame_Config.mode_choiceOnChoice(podname)) - getattr(self, podname + "_mode_choice").Bind(wx.EVT_CHOICE, - getattr(self, podname + "_mode_choiceOnChoice")) - - if re.search("live\d", podname): - for num in range(4): - setattr(self, podname + "_transcode" + str(num) + "_panel", wx.Panel(getattr( - self, podname + "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)) - globals()[podname + "_transcode" + str(num) + - "_bSizer"] = wx.BoxSizer(wx.HORIZONTAL) - - for option in ["codec", "protocol", "resolution", "bitrate"]: - if option == "codec": - globals()["%s_transcode%d_%s_choiceChoices" % ( - podname, num, option)] = [] - else: - globals()["%s_transcode%d_%s_choiceChoices" % ( - podname, num, option)] = self.choice_dict[option] - - setattr(self, podname + "_transcode" + str(num) + "_" + option + "_choice", wx.Choice(getattr(self, podname + "_transcode" + str( - num) + "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, globals()["%s_transcode%d_%s_choiceChoices" % (podname, num, option)], 0)) - - getattr(self, "%s_transcode%d_%s_choice" % - (podname, num, option)).SetSelection(-1) - globals()[podname + "_transcode" + str(num) + "_bSizer"].Add(getattr( - self, "%s_transcode%d_%s_choice" % (podname, num, option)), 0, wx.ALL, 5) - - setattr(self, podname + "_transcode" + str(num) + "_output_textCtrl", wx.TextCtrl(getattr(self, podname + - "_transcode" + str(num) + "_panel"), wx.ID_ANY, "output_name", wx.DefaultPosition, wx.DefaultSize, 0)) - globals()[podname + "_transcode" + str(num) + "_bSizer"].Add(getattr( - self, podname + "_transcode" + str(num) + "_output_textCtrl"), 0, wx.ALL, 5) - - getattr(self, podname + "_transcode" + str(num) + "_panel").SetSizer( - globals()[podname + "_transcode" + str(num) + "_bSizer"]) - getattr(self, podname + "_transcode" + - str(num) + "_panel").Layout() - globals()[podname + "_transcode" + str(num) + "_bSizer"].Fit( - getattr(self, podname + "_transcode" + str(num) + "_panel")) - globals()[podname + "_bSizer"].Add(getattr(self, podname + - "_transcode" + str(num) + "_panel"), 1, wx.EXPAND | wx.ALL, 5) - - globals()[podname + "_sdbSizer"] = wx.StdDialogButtonSizer() - setattr(self, podname + "_sdbSizerSave", - wx.Button(getattr(self, podname + "_panel"), wx.ID_SAVE)) - globals()[ - podname + "_sdbSizer"].AddButton(getattr(self, podname + "_sdbSizerSave")) - setattr(self, podname + "_sdbSizerCancel", - wx.Button(getattr(self, podname + "_panel"), wx.ID_CANCEL)) - globals()[podname + "_sdbSizer"].AddButton(getattr(self, - podname + "_sdbSizerCancel")) - globals()[podname + "_sdbSizer"].Realize() - globals()[podname + "_bSizer"].Add(globals() - [podname + "_sdbSizer"], 1, wx.EXPAND, 5) - - setattr(MyFrame_Config, podname + "_sdbSizerOnSaveButtonClick", - MyFrame_Config.show_pod_panel(re.split("\d+", podname)[0])) - getattr(self, podname + "_sdbSizerSave").Bind(wx.EVT_BUTTON, - getattr(self, podname + "_sdbSizerOnSaveButtonClick")) - setattr(MyFrame_Config, podname + "_sdbSizerOnCancelButtonClick", - MyFrame_Config.cancel_pod_panel(podname)) - getattr(self, podname + "_sdbSizerCancel").Bind(wx.EVT_BUTTON, - getattr(self, podname + "_sdbSizerOnCancelButtonClick")) - - setattr(MyFrame_Config, podname + "_node_choiceOnChoice", - MyFrame_Config.node_choiceOnChoice(podname)) - getattr(self, podname + "_node_choice").Bind(wx.EVT_CHOICE, - getattr(self, podname + "_node_choiceOnChoice")) - - getattr(self, podname + "_panel").SetSizer(globals() - [podname + "_bSizer"]) - getattr(self, podname + "_panel").Layout() - globals()[podname + "_bSizer"].Fit(getattr(self, podname + "_panel")) - arguments_label.Add( - getattr(self, podname + "_panel"), 1, wx.EXPAND | wx.ALL, 5) - getattr(self, podname + "_panel").Hide() - - setattr(MyFrame_Config, podname + "_buttonOnButtonClick", - MyFrame_Config.show_pod_panel(podname)) - getattr(self, podname + "_button").Bind(wx.EVT_BUTTON, - getattr(self, podname + "_buttonOnButtonClick")) - - def loginfo(self): - self.node_info = {} - for key, value in self.pods_dict.items(): - if not (key == "live" or key == "vod"): - getattr(self, key + "_button").SetBackgroundColour("#00FFFF") - for i in value.keys(): - if i.find("transcode") == -1: - value[i] = getattr( - self, key + "_" + i + "_choice").GetStringSelection() - else: - globals()[key + i + "_isready"] = True - for option in value[i].keys(): - if option == "output": - value[i][option] = getattr( - self, key + "_" + i + "_output_textCtrl").GetValue() - else: - value[i][option] = getattr( - self, key + "_" + i + "_" + option + "_choice").GetStringSelection() - if len(value[i][option]) == 0: - globals()[key + i + "_isready"] = False - - if len(value[i]) == 0 or value[i] == "0": - getattr( - self, key + "_button").SetBackgroundColour("#FFFFFF") - - if re.search("live\d", key) and not (globals()[key + "transcode0_isready"] or globals()[key + "transcode1_isready"] or globals()[key + "transcode2_isready"] or globals()[key + "transcode3_isready"]): - getattr(self, key + "_button").SetBackgroundColour("#FFFFFF") - - for node in self.choice_list: - self.node_info[node["nodename"]] = { - "modules": [], "cpu": 0, "memory": 0} - for key, value in self.pods_dict.items(): - if value["node"] == node["nodename"] and getattr(self, key + "_button").GetBackgroundColour() == "#00FFFF": - self.node_info[node["nodename"]]["modules"].append(key) - self.node_info[node["nodename"]]["cpu"] += float( - value["cpu"]) if len(value["cpu"]) > 0 else 0 - self.node_info[node["nodename"]]["memory"] += int( - value["memory"]) if len(value["memory"]) > 0 else 0 - - text_info = "" - for item in self.choice_list: - text_info += "Name: %s\nPods: %s\nCPU: capacity: %-10d used: %.1f\nMEMORY: capacity: %-8d used: %8d\n" % (item['nodename'], reduce( - lambda x, y: x + ' ' + y, self.node_info[item['nodename']]['modules']) if len(self.node_info[item['nodename']]['modules']) else None, item['cpu'], self.node_info[item['nodename']]['cpu'], item['memory'], self.node_info[item['nodename']]['memory']) - text_info += "ERROR cpu undercapacity \n" if item['cpu'] < self.node_info[item['nodename']]['cpu'] else "" - text_info += "ERROR memory undercapacity \n" if item['memory'] < self.node_info[item['nodename']]['memory'] else "" - text_info += "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n" - - self.log_textCtrl.AppendText(text_info) - - - @staticmethod - def node_choiceOnChoice(podname): - def fun(self, event): - getattr(self, podname + "_cpu_choice").SetItems([["0.5"] + [str(num) for num in range(1, node["cpu"])] - for node in self.choice_list if node["nodename"] == getattr(self, podname + "_node_choice").GetStringSelection()][0]) - getattr(self, podname + "_memory_choice").SetItems([["500"] + [str(num) for num in range(1000, node["memory"], 500)] - for node in self.choice_list if node["nodename"] == getattr(self, podname + "_node_choice").GetStringSelection()][0]) - return fun - - @staticmethod - def mode_choiceOnChoice(podname): - def fun(self, event): - codec_list = ["AVC", "HEVC"] if getattr( - self, podname + "_mode_choice").GetStringSelection() == "HW" else ["AVC", "HEVC", "AV1"] - for num in range(4): - getattr(self, "%s_transcode%d_codec_choice" % - (podname, num)).SetItems(codec_list) - return fun - - @staticmethod - def cancel_pod_panel(podname): - def fun(self, event): - for i in ["node", "cpu", "memory", "mode", "input"]: - getattr(self, podname + "_" + i + "_choice").SetSelection(-1) - if re.search("live\d", podname): - for num in range(4): - for option in ["codec", "protocol", "resolution", "bitrate"]: - getattr(self, "%s_transcode%d_%s_choice" % - (podname, num, option)).SetSelection(-1) - self.loginfo() - return fun - - @staticmethod - def show_pod_panel(podname): - def fun(self, event): - for key, value in self.pods_dict.items(): - try: - getattr(self, key + "_panel").Hide() - except: - pass - self.arguments_label.StaticBox.SetLabel(podname + "_config") - getattr(self, podname + "_panel").Show() - if re.search("[(vod)|(live)]\d", podname): - self.setsize() - self.loginfo() - return fun - - def setsize(self): - self.arguments_panel.SetSize(self.arguments_panel.GetSize( - )[0] + (-1 if self.setsize_num % 2 else 1), self.arguments_panel.GetSize()[1]) - self.setsize_num += 1 - - @staticmethod - def creat_buttonOnButtonClick(modulename): - def fun(self, event): - setattr(self, modulename + str(getattr(self, modulename + "_num")) + "_button", wx.Button(getattr(self, modulename + "_scrolledWindow"), - wx.ID_ANY, modulename + str(getattr(self, modulename + "_num")), (20, getattr(self, modulename + "_num") * 60), wx.DefaultSize, 0)) - getattr(self, modulename + "_list_wSizer").Add(getattr(self, modulename + - str(getattr(self, modulename + "_num")) + "_button"), 0, wx.ALL, 5) - self.pods_dict[modulename + - str(getattr(self, modulename + "_num"))] = {} - self.create_pod_panel( - modulename + str(getattr(self, modulename + "_num")), self.arguments_label) - setattr(self, modulename + "_num", - getattr(self, modulename + "_num") + 1) - self.setsize() - return fun - - def creat_module_panel(self, arguments_label, modulename): - setattr(self, modulename + "_panel", wx.ScrolledWindow(arguments_label.GetStaticBox(), - wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL)) - getattr(self, modulename + "_panel").SetScrollRate(5, 5) - globals()[modulename + "_bSizer"] = wx.BoxSizer(wx.VERTICAL) - - setattr(self, modulename + "_scrolledWindow", wx.ScrolledWindow(getattr(self, modulename + - "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL)) - getattr(self, modulename + "_scrolledWindow").SetScrollRate(5, 5) - globals()[modulename + "_list_wSizer"] = wx.WrapSizer(wx.HORIZONTAL, - wx.WRAPSIZER_DEFAULT_FLAGS) - setattr(self, modulename + "_list_wSizer", - globals()[modulename + "_list_wSizer"]) - - getattr(self, modulename + "_scrolledWindow").SetSizer(globals() - [modulename + "_list_wSizer"]) - getattr(self, modulename + "_scrolledWindow").Layout() - globals()[modulename + "_list_wSizer"].Fit(getattr(self, - modulename + "_scrolledWindow")) - globals()[modulename + "_bSizer"].Add(getattr(self, - modulename + "_scrolledWindow"), 1, wx.EXPAND | wx.ALL, 5) - - setattr(self, modulename + "_creat_button", wx.Button(getattr(self, modulename + - "_panel"), wx.ID_ANY, u"creat", wx.DefaultPosition, wx.DefaultSize, 0)) - globals()[modulename + "_bSizer"].Add(getattr(self, - modulename + "_creat_button"), 0, wx.ALL, 5) - setattr(self, modulename + "_bSizer", - globals()[modulename + "_bSizer"]) - - getattr(self, modulename + "_panel").SetSizer(globals() - [modulename + "_bSizer"]) - getattr(self, modulename + "_panel").Layout() - globals()[modulename + "_bSizer"].Fit(getattr(self, modulename + "_panel")) - arguments_label.Add( - getattr(self, modulename + "_panel"), 1, wx.EXPAND | wx.ALL, 5) - - setattr(MyFrame_Config, modulename + "_creat_buttonOnButtonClick", - MyFrame_Config.creat_buttonOnButtonClick(modulename)) - getattr(self, modulename + "_creat_button").Bind(wx.EVT_BUTTON, - getattr(self, modulename + "_creat_buttonOnButtonClick")) - - def menu_sdbSizerOnCancelButtonClick(self, event): - sys.exit(1) - - def menu_sdbSizerOnOKButtonClick(self, event): - self.loginfo() - pods = [] - for key, value in self.node_info.items(): - for pod in value["modules"]: - if getattr(self, pod + "_button").GetBackgroundColour() == (0, 255, 255, 255): - pods.extend(value["modules"]) - pods = list(set(pods)) - - for module in ["cdn", "redis", "zookeeper", "kafka"]: - if module not in pods: - self.log_textCtrl.AppendText(module + " not config\n") - return - - update_yaml.update_yaml(nfs_server=self.nfs_server, volume_directory=self.volume_directory, dir_path = sys.argv[1], - pods=pods, pods_dict=self.pods_dict, node_dict=self.node_dict) - self.Destroy() diff --git a/deployment/kubernetes/frame_index.py b/deployment/kubernetes/frame_index.py deleted file mode 100755 index 501ecb1..0000000 --- a/deployment/kubernetes/frame_index.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/python3 - -import wx -import wx.xrc -import re -import os -import sys -import functools -from frame_config import MyFrame_Config - -class MyFrame_Index (wx.Frame): - - def __init__(self, parent): - self.errwin = wx.MessageDialog( - parent=None, - message=u"Invalid parameter, Please input a integer ...", - caption=u"ERROR", - style=wx.OK) - - wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u"NFS Server Configuration", - pos=wx.DefaultPosition, size=wx.Size(500, 300), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL) - self.Bind(wx.EVT_CLOSE, self.OnClose) - - self.SetSizeHints(wx.DefaultSize, wx.DefaultSize) - - gSizer1 = wx.GridSizer(0, 2, 0, 0) - - self.m_staticText1 = wx.StaticText( - self, wx.ID_ANY, u"IP address:", wx.DefaultPosition, wx.DefaultSize, 0) - self.m_staticText1.Wrap(-1) - - gSizer1.Add(self.m_staticText1, 0, wx.ALIGN_CENTER_HORIZONTAL | - wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5) - - self.m_textCtrl1 = wx.TextCtrl( - self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0) - gSizer1.Add(self.m_textCtrl1, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5) - - self.m_staticText3 = wx.StaticText( - self, wx.ID_ANY, u"Username:", wx.DefaultPosition, wx.DefaultSize, 0) - self.m_staticText3.Wrap(-1) - - gSizer1.Add(self.m_staticText3, 0, wx.ALIGN_CENTER_HORIZONTAL | - wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5) - - self.m_textCtrl2 = wx.TextCtrl( - self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0) - gSizer1.Add(self.m_textCtrl2, 0, wx.ALL, 5) - - self.m_staticText4 = wx.StaticText( - self, wx.ID_ANY, u"Password:", wx.DefaultPosition, wx.DefaultSize, 0) - self.m_staticText4.Wrap(-1) - - gSizer1.Add(self.m_staticText4, 0, wx.ALIGN_CENTER_HORIZONTAL | - wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5) - - self.m_textCtrl3 = wx.TextCtrl( - self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_PASSWORD) - gSizer1.Add(self.m_textCtrl3, 0, wx.ALL, 5) - - self.m_staticText5 = wx.StaticText( - self, wx.ID_ANY, u"Project directory path:", wx.DefaultPosition, wx.DefaultSize, 0) - self.m_staticText5.Wrap(-1) - - gSizer1.Add(self.m_staticText5, 0, wx.ALIGN_CENTER_HORIZONTAL | - wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5) - - self.m_textCtrl4 = wx.TextCtrl( - self, wx.ID_ANY, wx.EmptyString, wx.Point(-1, -1), wx.DefaultSize, 0) - gSizer1.Add(self.m_textCtrl4, 0, wx.ALL, 5) - - self.m_button1 = wx.Button( - self, wx.ID_ANY, u"cancel", wx.DefaultPosition, wx.DefaultSize, 0) - - self.Bind(wx.EVT_BUTTON, self.cancelEvent, self.m_button1) - gSizer1.Add(self.m_button1, 0, wx.ALIGN_RIGHT | wx.ALL, 5) - - self.m_button2 = wx.Button( - self, wx.ID_ANY, u"save", wx.DefaultPosition, wx.DefaultSize, 0) - self.Bind(wx.EVT_BUTTON, self.sureEvent, self.m_button2) - gSizer1.Add(self.m_button2, 0, wx.ALL, 5) - - self.SetSizer(gSizer1) - self.Layout() - - self.Centre(wx.BOTH) - - def __del__(self): - pass - - def OnClose(self, event): - sys.exit(1) - - def sureEvent(self, event): - self.nfs_server = self.m_textCtrl1.GetValue() - self.username = self.m_textCtrl2.GetValue() - self.password = self.m_textCtrl3.GetValue() - self.volume_directory = self.m_textCtrl4.GetValue() - - check_info = self.check_info() - if not check_info == "OK": - self.errwin.SetMessage(check_info) - self.errwin.ShowModal() - return - - try: - exec_cmd = os.popen("fab -u %s -p %s -H %s -- 'ls %s'" % (self.username, self.password, - self.nfs_server, os.path.join(self.volume_directory, "volume/video/archive"))) - result = [re.findall(r'[^\\\s/:\*\?"<>\|]+', i) - for i in re.findall(r'out:(.+)\n', exec_cmd.read())] - video_list = [i for i in functools.reduce( - lambda x, y:x+y, result) if os.path.splitext(i)[1] == '.mp4'] - except: - self.errwin.SetMessage("connect error") - self.errwin.ShowModal() - return - - if len(video_list) == 0: - self.errwin.SetMessage("no video") - self.errwin.ShowModal() - return - - self.Destroy() - frame_config = MyFrame_Config(None, nfs_server=self.nfs_server, - volume_directory=self.volume_directory, video_list=video_list) - frame_config.Show(True) - - def cancelEvent(self, event): - sys.exit(1) - - def check_info(self): - if not re.match("((25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))\.){3}(25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))$", self.nfs_server): - return "ip is error" - for info in ["username", "password", "volume_directory"]: - if not re.search("^([\w_\-\&/]+)+$", getattr(self, info)): - return info + " error" - - if not os.path.isabs(self.volume_directory): - return "not abs" - elif re.match(".+/$", self.volume_directory): - self.volume_directory = self.volume_directory[:-1] - - return "OK" diff --git a/deployment/kubernetes/kafka-deploy.yaml.m4 b/deployment/kubernetes/kafka-deploy.yaml.m4 new file mode 100644 index 0000000..c2d6268 --- /dev/null +++ b/deployment/kubernetes/kafka-deploy.yaml.m4 @@ -0,0 +1,61 @@ +include(platform.m4) +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka + labels: + app: kafka +spec: + replicas: 1 + selector: + matchLabels: + app: kafka + template: + metadata: + labels: + app: kafka + spec: + enableServiceLinks: false + containers: + - name: kafka + image: wurstmeister/kafka:2.12-2.4.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9092 + env: + - name: "KAFKA_BROKER_ID" + value: "1" + - name: "KAFKA_ZOOKEEPER_CONNECT" + value: "zookeeper-service:2181" + - name: "KAFKA_LISTENERS" + value: "PLAINTEXT://:9092" + - name: "KAFKA_ADVERTISED_LISTENERS" + value: "PLAINTEXT://kafka-service:9092" + - name: "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP" + value: "PLAINTEXT:PLAINTEXT" + - name: "KAFKA_INTER_BROKER_LISTENER_NAME" + value: "PLAINTEXT" + - name: "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR" + value: "1" + - name: "KAFKA_DEFAULT_REPLICATION_FACTOR" + value: "1" + - name: "KAFKA_AUTO_CREATE_TOPICS_ENABLE" + value: "true" + - name: KAFKA_CREATE_TOPICS + value: content_provider_sched:16:1 + - name: "KAFKA_NUM_PARTITIONS" + value: "16" + - name: "KAFKA_LOG_RETENTION_MINUTES" + value: "30" + - name: "KAFKA_HEAP_OPTS" + value: "-Xmx1024m -Xms1024m" + - name: "KAFKA_LOG4J_ROOT_LOGLEVEL" + value: "ERROR" + resources: + requests: + cpu: 1 + memory: 500Mi + limits: + cpu: 2 + memory: 1000Mi +PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/kafka-service-deployment.yaml.m4 b/deployment/kubernetes/kafka-service-deployment.yaml.m4 deleted file mode 100644 index a7b372f..0000000 --- a/deployment/kubernetes/kafka-service-deployment.yaml.m4 +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: kafka-service - name: kafka-service -spec: - selector: - matchLabels: - app: kafka-service - replicas: 1 - template: - metadata: - labels: - app: kafka-service - spec: - containers: - - env: - - name: KAFKA_ADVERTISED_HOST_NAME - value: kafka-service - - name: KAFKA_ADVERTISED_LISTENERS - value: PLAINTEXT://kafka-service:9092 - - name: KAFKA_ADVERTISED_PORT - value: "9092" - - name: KAFKA_AUTO_CREATE_TOPICS_ENABLE - value: "true" - - name: KAFKA_BROKER_ID - value: "1" - - name: KAFKA_CREATE_TOPICS - value: content_provider_sched:16:1 - - name: KAFKA_DEFAULT_REPLICATION_FACTOR - value: "1" - - name: KAFKA_HEAP_OPTS - value: -Xmx1024m -Xms1024m - - name: KAFKA_INTER_BROKER_LISTENER_NAME - value: PLAINTEXT - - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP - value: PLAINTEXT:PLAINTEXT - - name: KAFKA_LOG4J_LOGGERS - value: kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR - - name: KAFKA_LOG4J_ROOT_LOGLEVEL - value: ERROR - - name: KAFKA_LOG_RETENTION_HOURS - value: "8" - - name: KAFKA_NUM_PARTITIONS - value: "16" - - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR - value: "1" - - name: KAFKA_ZOOKEEPER_CONNECT - value: zookeeper-service:2181 - image: wurstmeister/kafka:latest - name: kafka-service - ports: - - containerPort: 9092 - resources: - limits: - cpu: "2" - memory: 1048576e3 - requests: - cpu: "1" - memory: 524288e3 - nodeSelector: - kubernetes.io/hostname: master.machine - restartPolicy: Always diff --git a/deployment/kubernetes/kafka-service-service.yaml.m4 b/deployment/kubernetes/kafka-service-service.yaml.m4 deleted file mode 100644 index a729a70..0000000 --- a/deployment/kubernetes/kafka-service-service.yaml.m4 +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: kafka-service - name: kafka-service -spec: - ports: - - name: "9092" - port: 9092 - targetPort: 9092 - selector: - app: kafka-service - type: NodePort diff --git a/deployment/kubernetes/kafka-svc.yaml.m4 b/deployment/kubernetes/kafka-svc.yaml.m4 new file mode 100644 index 0000000..6adfca0 --- /dev/null +++ b/deployment/kubernetes/kafka-svc.yaml.m4 @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: kafka-service + labels: + app: kafka +spec: + ports: + - port: 9092 + protocol: TCP + selector: + app: kafka diff --git a/deployment/kubernetes/live-deploy.yaml.m4 b/deployment/kubernetes/live-deploy.yaml.m4 new file mode 100644 index 0000000..981cd00 --- /dev/null +++ b/deployment/kubernetes/live-deploy.yaml.m4 @@ -0,0 +1,48 @@ +include(platform.m4) +apiVersion: apps/v1 +kind: Deployment +metadata: + name: live-defn(`LIVEIDX') + labels: + app: live-defn(`LIVEIDX') +spec: + replicas: 1 + selector: + matchLabels: + app: live-defn(`LIVEIDX') + template: + metadata: + labels: + app: live-defn(`LIVEIDX') + spec: + enableServiceLinks: false + containers: + - name: live-defn(`LIVEIDX') + image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 8 + memory: 6000Mi + requests: + cpu: 4 + memory: 3000Mi + env: + - name: NO_PROXY + value: "*" + - name: no_proxy + value: "*" + volumeMounts: + - mountPath: /var/www/video + name: video-cache + - mountPath: /var/www/archive + name: video-archive + readOnly: true + volumes: + - name: video-cache + persistentVolumeClaim: + claimName: video-cache + - name: video-archive + persistentVolumeClaim: + claimName: video-archive +PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/live-service-deployment.yaml.m4 b/deployment/kubernetes/live-service-deployment.yaml.m4 deleted file mode 100644 index 82739a4..0000000 --- a/deployment/kubernetes/live-service-deployment.yaml.m4 +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: live-service - name: live-service -spec: - selector: - matchLabels: - app: live-service - replicas: 1 - template: - metadata: - creationTimestamp: null - labels: - app: live-service - spec: - containers: - - image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest - imagePullPolicy: IfNotPresent - name: live-service - resources: - limits: - cpu: "6" - memory: 6291456e3 - requests: - cpu: "3" - memory: 3145728e3 - nodeSelector: - kubernetes.io/hostname: master.machine - restartPolicy: Always diff --git a/deployment/kubernetes/mkvolume.sh b/deployment/kubernetes/mkvolume.sh new file mode 100755 index 0000000..77dc93e --- /dev/null +++ b/deployment/kubernetes/mkvolume.sh @@ -0,0 +1,32 @@ +#!/bin/bash -e + +DIR=$(dirname $(readlink -f "$0")) + +echo "Making volumes..." +HOSTS=$(kubectl get node -o 'custom-columns=NAME:.status.addresses[?(@.type=="Hostname")].address,IP:.status.addresses[?(@.type=="InternalIP")].address' | awk '!/NAME/{print $1":"$2}') +awk -v DIR="$DIR" -v HOSTS="$HOSTS" ' +BEGIN{ + split(HOSTS,tmp1," "); + for (i in tmp1) { + split(tmp1[i],tmp2,":"); + host2ip[tmp2[1]]=tmp2[2]; + } +} +/name:/ { + gsub("-","/",$2) + content="\""DIR"/../../volume/"$2"\"" +} +/path:/ { + path=$2 +} +/- ".*"/ { + host=host2ip[substr($2,2,length($2)-2)]; + system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); + if (path == "/tmp/archive/video") { + system("scp -r "content"/* "host":"path); + } +} +END { + system("echo finished...") +} +' "$DIR"/*-pv.yaml diff --git a/deployment/kubernetes/platform.m4 b/deployment/kubernetes/platform.m4 new file mode 100644 index 0000000..74dad55 --- /dev/null +++ b/deployment/kubernetes/platform.m4 @@ -0,0 +1,16 @@ +define(`PLATFORM_SUFFIX',translit(defn(`PLATFORM'),`A-Z',`a-z'))dnl +define(`PLATFORM_RESOURCES',dnl +ifelse(defn(`PLATFORM'),`XeonE3',dnl +# gpu.intel.com/i915: 1 +))dnl +define(`PLATFORM_NODE_SELECTOR',dnl + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "xeone3-zone" + operator: `ifelse(defn(`PLATFORM'),`XeonE3',ifelse($1,`XeonE3',`In',`NotIn'),`NotIn')' + values: + - "xeone3" +)dnl diff --git a/deployment/kubernetes/redis-deploy.yaml.m4 b/deployment/kubernetes/redis-deploy.yaml.m4 new file mode 100644 index 0000000..74f8004 --- /dev/null +++ b/deployment/kubernetes/redis-deploy.yaml.m4 @@ -0,0 +1,33 @@ +include(platform.m4) +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis + labels: + app: redis +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + containers: + - name: redis + image: redis:latest + imagePullPolicy: IfNotPresent + command: + - redis-server + ports: + - containerPort: 6379 + resources: + requests: + cpu: 1 + memory: 500Mi + limits: + cpu: 2 + memory: 1000Mi +PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/redis-service-deployment.yaml.m4 b/deployment/kubernetes/redis-service-deployment.yaml.m4 deleted file mode 100644 index 0425eb7..0000000 --- a/deployment/kubernetes/redis-service-deployment.yaml.m4 +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: redis-service - name: redis-service -spec: - selector: - matchLabels: - app: redis-service - replicas: 1 - template: - metadata: - creationTimestamp: null - labels: - app: redis-service - spec: - containers: - - args: - - redis-server - image: redis:latest - name: redis-service - ports: - - containerPort: 6379 - resources: - limits: - cpu: "2" - memory: 1048576e3 - requests: - cpu: "1" - memory: 524288e3 - nodeSelector: - kubernetes.io/hostname: master.machine - restartPolicy: Always diff --git a/deployment/kubernetes/redis-service-service.yaml.m4 b/deployment/kubernetes/redis-service-service.yaml.m4 deleted file mode 100644 index 22bd6ad..0000000 --- a/deployment/kubernetes/redis-service-service.yaml.m4 +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: redis-service - name: redis-service -spec: - ports: - - name: "6379" - port: 6379 - targetPort: 6379 - selector: - app: redis-service diff --git a/deployment/kubernetes/redis-svc.yaml.m4 b/deployment/kubernetes/redis-svc.yaml.m4 new file mode 100644 index 0000000..2677ba9 --- /dev/null +++ b/deployment/kubernetes/redis-svc.yaml.m4 @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: redis-service + labels: + app: redis +spec: + ports: + - port: 6379 + protocol: TCP + selector: + app: redis diff --git a/deployment/kubernetes/run_with_GUI.py b/deployment/kubernetes/run_with_GUI.py deleted file mode 100755 index 5ab994a..0000000 --- a/deployment/kubernetes/run_with_GUI.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/python3 - -import wx -from frame_index import MyFrame_Index - -if __name__ == '__main__': - app = wx.App() - dlg = MyFrame_Index(None) - dlg.Show(True) - app.MainLoop() diff --git a/deployment/kubernetes/run_with_command.py b/deployment/kubernetes/run_with_command.py index ec9927d..4478dca 100755 --- a/deployment/kubernetes/run_with_command.py +++ b/deployment/kubernetes/run_with_command.py @@ -2,202 +2,44 @@ import yaml_utils import update_yaml -import os import re -import copy -import subprocess import sys -import socket -import functools +import configparser sys.path.append(sys.argv[1]) -def ping(host): - cmd = 'ping -c %d %s' % (1, host) - p = subprocess.Popen(args=cmd, shell=True, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - out = p.stdout.read().decode() - - reg_receive = "(\d+) received" - match_receive = re.search(reg_receive, out) - receive_count = -1 - - if match_receive: - receive_count = int(match_receive.group().split(' ')[0]) - if receive_count > 0: - return True - else: - return False - -def get_volume_directory(nfs_server, is_localhost): - video_list = [] - - if is_localhost: - volume_directory = os.path.dirname(os.path.dirname(sys.argv[1])) - - for i in os.listdir(os.path.join(volume_directory, "volume/video/archive")): - if os.path.splitext(i)[1] == '.mp4': - video_list.append(i) - return volume_directory, video_list - - volume_directory = input( - "Please input CDN-Transcode-Sample volume directory path on NFS server: ") - while True: - if not os.path.isabs(volume_directory): - volume_directory = input( - "Input error, please input CDN-Transcode-Sample volume directory path on NFS server again: ") - else: - if re.match(".+/$", volume_directory): - volume_directory = volume_directory[:-1] - break - - username = input("Please input NFS server username: ") - while True: - if re.match(r"[^\s]+$", username): - exec_cmd = os.popen("fab -u %s -H %s -- 'ls %s'" % (username, nfs_server, - os.path.join(volume_directory, "volume/video/archive"))) - result = [re.findall(r'[^\\\s/:\*\?"<>\|]+', i) - for i in re.findall(r'out:(.+)\n', exec_cmd.read())] - video_list = [i for i in functools.reduce( - lambda x, y:x+y, result) if os.path.splitext(i)[1] == '.mp4'] - break - else: - username = input( - "Input error, please input NFS server username again: ") - return volume_directory, video_list - -def configure_basic_module(node_num): - if node_num > 1: - nfs_server = input( - "Please input where the video clips server is ([NFS server IP address]): ") - while True: - if re.match("((25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))\.){3}(25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))$", nfs_server): - if not ping(nfs_server): - nfs_server = input( - "Can't ping your NFS server ip address, please input where the video clips server is again ([NFS server IP address]): ") - continue - volume_directory, video_list = get_volume_directory( - nfs_server, False) - break - else: - nfs_server = input( - "Input error, please input where the video clips server is again ([NFS server IP address]): ") - else: - nfs_server = input( - "Please input where the video clips server is ([localhost] or [NFS server IP address]): ") - while True: - if nfs_server == "localhost": - volume_directory, video_list = get_volume_directory( - nfs_server, True) - break - elif re.match("((25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))\.){3}(25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))", nfs_server): - if not ping(nfs_server): - nfs_server = input( - "Can't ping your NFS server IP address, please input where the video clips server is again ([localhost] or [NFS server IP address]): ") - continue - volume_directory, video_list = get_volume_directory( - nfs_server, False) - break - else: - nfs_server = input( - "Input error, please input where the video clips server is again ([localhost] or [NFS server IP address]): ") - if not video_list: - print("\033[0;31;40mNo video clips were found!!!\033[0m") - os._exit(1) - return nfs_server, volume_directory, video_list - -def input_node_name(service_name, pods_dict, image_name="sw"): - node_name_list = sw_node_name_list - - if image_name == "hw": - node_name_list = hw_node_name_list - - if node_num == 1: - node_name = node_name_list[0] - else: - node_name = input("Please input run " + service_name + - " node name (" + str(node_name_list)[1:-1] + "): ") - while True: - if node_name == "": - node_name = node_name_list[0] - if node_name in node_name_list: - break - else: - node_name = input("Input error, please input run " + service_name + - " node name again (" + str(node_name_list)[1:-1] + "): ") - - if image_name == "hw": - hw_node_name_list.remove(node_name) - - pods_dict[service_name]["node"] = node_name - return pods_dict - -def input_request_cpu(service_name, node_dict, pods_dict, cpu_quota): - if re.match(r"\d{1,2}(\.\d+)?$", cpu_quota) and node_dict[pods_dict[service_name]["node"]]["cpu"] > float(cpu_quota) > 0: - node_dict[pods_dict[service_name] - ["node"]]["cpu"] -= float(cpu_quota) - pods_dict[service_name]["cpu"] = float(cpu_quota) - else: - print("Error: Overload! Pleaes redistribute cpu request in cpu_mem_managerment.cfg") - os._exit() - return node_dict, pods_dict - -def input_request_mem(service_name, node_dict, pods_dict, mem_quota): - if re.match(r"\d{3,5}$", mem_quota) and node_dict[pods_dict[service_name]["node"]]["memory"] > int(mem_quota) > 0: - node_dict[pods_dict[service_name]["node"] - ]["memory"] -= int(mem_quota) - pods_dict[service_name]["memory"] = int(mem_quota) - else: - print("Error: Overload! Pleaes redistribute memory request in cpu_mem_managerment.cfg") - os._exit() - return node_dict, pods_dict - -def configure_live_transcode_args(service_name, num, trans_cfg_dict, image_name): +def configure_live_transcode_args(service_name, trans_cfg_dict): pods_dict[service_name]["input"] = trans_cfg_dict[service_name]['url'] for trans_num in range(int(trans_cfg_dict[service_name]['density'])): pods_dict[service_name]["transcode" + str(trans_num)] = { - 'codec': trans_cfg_dict[service_name]['encoder_type'], 'protocol': trans_cfg_dict[service_name]['protocol'], 'resolution': trans_cfg_dict[service_name]['width_height'], 'bitrate': trans_cfg_dict[service_name]['bitrate'], 'framerate':trans_cfg_dict[service_name]['framerate'], 'gop': trans_cfg_dict[service_name]['gop'], 'maxbFrames': trans_cfg_dict[service_name]['maxbframes'], 'refsNum': trans_cfg_dict[service_name]['refsnum'], 'preset': trans_cfg_dict[service_name]['preset'], 'output': 'output_name'} + 'codec': trans_cfg_dict[service_name]['encodetype'], 'protocol': trans_cfg_dict[service_name]['protocol'], 'resolution': trans_cfg_dict[service_name]['width'] + "x" + trans_cfg_dict[service_name]['height'], 'bitrate': trans_cfg_dict[service_name]['bitrate'], 'framerate':trans_cfg_dict[service_name]['framerate'], 'gop': trans_cfg_dict[service_name]['gop'], 'maxbFrames': trans_cfg_dict[service_name]['maxbframes'], 'refsNum': trans_cfg_dict[service_name]['refsnum'], 'preset': trans_cfg_dict[service_name]['preset'], 'output': 'output_name'} return -def configure_transcode_service(service_name, num, trans_cfg_dict): - global hw_node_num - +def configure_live_transcode_service(num, trans_cfg_dict): for i in range(int(num)): - service_name_index = re.search( - "((vod)|(live))(\d*)", service_name).group(1) + str(i) + service_name_index = "live-" + str(i) pods.append(service_name_index) pods_dict[service_name_index] = {} - if hw_node_num > 0: - if trans_cfg_dict[service_name_index]['hwaccel'] == 'true': - image_name = "hw" - elif trans_cfg_dict[service_name_index]['hwaccel'] == 'false': - image_name = "sw" - while True: - if image_name.lower() == "sw" or image_name.lower() == "hw": - hw_node_num -= 1 if image_name.lower() == "hw" else 0 - break - else: - image_name = input("Input error, please choose the transcode mode of the " + str(i) + "th" + service_name + - " again ([hw]: hardware is for E3/VCA2 or [sw]: software is for E5): ") - else: - image_name = "sw" - pods_dict[service_name_index]["mode"] = image_name - if re.search("live\d+", service_name_index): - configure_live_transcode_args( - service_name_index, num, trans_cfg_dict, image_name.lower()) + configure_live_transcode_args( + service_name_index, trans_cfg_dict) -def get_node_information(description): - node_dict={} - for line in description.split("\n"): - fields=line.split() - if fields[2].endswith("Ki"): memory=int(fields[2][:-2])/1024 - if fields[2].endswith("Mi"): memory=int(fields[2][:-2]) - if fields[2].endswith("Gi"): memory=int(fields[2][:-2])*1024 - node_dict[fields[0]]={ "cpu": int(fields[1]), "memory": int(memory) } - return node_dict +def get_request_cpu(service_name, pods_dict, cpu_quota): + if re.match(r"\d{1,2}(\.\d+)?$", cpu_quota): + pods_dict[service_name]["cpu"] = float(cpu_quota) + else: + print("Error: Pleaes redistribute CPU request in cpu_mem_management.cfg") + os._exit() + return pods_dict + +def get_request_mem(service_name, pods_dict, mem_quota): + if re.match(r"\d{3,5}[MKG]i", mem_quota): + pods_dict[service_name]["memory"] = str(mem_quota) + else: + print("Error: Pleaes redistribute memory request in cpu_mem_management.cfg") + os._exit() + return pods_dict def get_config(config_file): - import configparser config = configparser.ConfigParser() config.read(config_file) config_dict = dict(config._sections) @@ -205,37 +47,30 @@ def get_config(config_file): config_dict[k] = dict(config_dict[k]) return config_dict -sw_node_name_list = sys.argv[4].split("\n") -node_num=len(sw_node_name_list) -sw_node_name_list = list(filter(None, sw_node_name_list)) -hw_node_name_list = copy.deepcopy(sw_node_name_list) -hw_node_num = len(hw_node_name_list) -nfs_server, volume_directory, video_list = configure_basic_module(node_num) - -pods_dict = {"cdn": {}, "redis": {}, "zookeeper": {}, "kafka": {}} -node_dict = get_node_information(sys.argv[5]) -pods = ["cdn", "redis", "zookeeper", "kafka"] - DIRS = sys.argv[1] NVODS = sys.argv[2] NLIVES = sys.argv[3] -NNODES = sys.argv[4] -NODE_DESCRIPTION = sys.argv[5] -live_transcode_cfg = DIRS + '/live-transcode.cfg' -vod_transcode_cfg = DIRS + '/vod-transcode.cfg' -cpu_mem_cfg = DIRS + '/cpu_mem_managerment.cfg' -live_trans_cfg_dict = get_config(live_transcode_cfg) -vod_trans_cfg_dict = get_config(vod_transcode_cfg) +pods_dict = {"cdn": {}, "redis": {}, "zookeeper": {}, "kafka": {}} +pods = ["cdn", "redis", "zookeeper", "kafka"] + +cpu_mem_cfg = DIRS + '/cpu_mem_management.cfg' cpu_mem_cfg_dict = get_config(cpu_mem_cfg) -trans_cfg_dict = {**live_trans_cfg_dict, **vod_trans_cfg_dict} +live_transcode_cfg = {} +live_trans_cfg_dict = {} + +if int(NVODS) > 0: + pods.append("vod") + pods_dict["vod"] = {} -configure_transcode_service("vod", NVODS, vod_trans_cfg_dict) -configure_transcode_service("live", NLIVES, live_trans_cfg_dict) +if int(NLIVES) > 0: + live_transcode_cfg = DIRS + '/live-transcode.cfg' + live_trans_cfg_dict = get_config(live_transcode_cfg) + configure_live_transcode_service(NLIVES, live_trans_cfg_dict) for pod in pods: - pods_dict = input_node_name(pod, pods_dict) - node_dict, pods_dict = input_request_cpu(pod, node_dict, pods_dict, cpu_mem_cfg_dict[pod]['cpu']) - node_dict, pods_dict = input_request_mem(pod, node_dict, pods_dict, cpu_mem_cfg_dict[pod]['mem']) + print(pod); + pods_dict = get_request_cpu(pod, pods_dict, cpu_mem_cfg_dict[pod]['cpu']) + pods_dict = get_request_mem(pod, pods_dict, cpu_mem_cfg_dict[pod]['mem']) -update_yaml.update_yaml(nfs_server, volume_directory, sys.argv[1], pods, pods_dict, get_node_information(),trans_cfg_dict) +update_yaml.update_yaml(sys.argv[1], pods, pods_dict, live_trans_cfg_dict) diff --git a/deployment/kubernetes/start.sh b/deployment/kubernetes/start.sh index 90af793..2425d2d 100755 --- a/deployment/kubernetes/start.sh +++ b/deployment/kubernetes/start.sh @@ -30,20 +30,40 @@ fi try_command hash kubectl > /dev/null -for i in $(find "$DIR" -maxdepth 1 -name "*service.yaml"); do +for i in $(find "$DIR" -maxdepth 1 -name "*svc.yaml"); do len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-service.yaml//') - for j in $(kubectl get svc | awk '{print $1}' | sed -n '2, $p' | grep -v 'kubernetes'); do + i1=$(echo ${i:${len}} | sed 's/-svc.yaml//') + for j in $(kubectl get svc | awk '{print $1}' | sed -n '2, $p' | grep -v 'kubernetes' | awk -F '-' '{print $1}'); do if [ "$i1" == "$j" ]; then kubectl delete -f "$i" fi done done -for i in $(find "$DIR" -maxdepth 1 -name "*deployment.yaml"); do +for i in $(find "$DIR" -maxdepth 1 -name "*deploy.yaml"); do len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-deployment.yaml//') - for j in $(kubectl get pod | awk '{print $1}' | sed -n '2, $p' | awk -F '-' '{$NF=""; $(NF-1)=""; gsub(" ", "");gsub(" ", "-"); print}'); do + i1=$(echo ${i:${len}} | sed 's/-deploy.yaml//') + for j in $(kubectl get pod | awk '{print $1}' | sed -n '2, $p' | awk -F '-' '{$NF=""; $(NF-1)=""; gsub(" ", "");gsub(" ", "-"); print}' | uniq); do + if [ ${i1} == ${j} ]; then + kubectl delete -f "${i}" + fi + done +done + +for i in $(find "$DIR" -maxdepth 1 -name "*pvc.yaml"); do + len=$(echo $DIR | wc -m) + i1=$(echo ${i:${len}} | sed 's/-pvc.yaml//') + for j in $(kubectl get pvc | awk '{print $1}' | sed -n '2, $p'); do + if [ ${i1} == ${j} ]; then + kubectl delete -f "${i}" + fi + done +done + +for i in $(find "$DIR" -maxdepth 1 -name "*pv.yaml"); do + len=$(echo $DIR | wc -m) + i1=$(echo ${i:${len}} | sed 's/-pv.yaml//') + for j in $(kubectl get pv | awk '{print $1}' | sed -n '2, $p'); do if [ ${i1} == ${j} ]; then kubectl delete -f "${i}" fi diff --git a/deployment/kubernetes/start_GUI.sh b/deployment/kubernetes/start_GUI.sh deleted file mode 100755 index fd46403..0000000 --- a/deployment/kubernetes/start_GUI.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -e - -DIR=$(dirname $(readlink -f "$0")) -export NGINX_LOG_VOLUME=$(readlink -f "/var/log/nginx") - -EXT=*.yaml - -# Set Bash color -ECHO_PREFIX_INFO="\033[1;32;40mINFO...\033[0;0m" -ECHO_PREFIX_ERROR="\033[1;31;40mError...\033[0;0m" - -# Try command for test command result. -function try_command { - "$@" - status=$? - if [ $status -ne 0 ]; then - echo -e $ECHO_PREFIX_ERROR "ERROR with \"$@\", Return status $status." - exit $status - fi - return $status -} - -function create_secret { - kubectl create secret generic self-signed-certificate "--from-file=${DIR}/../certificate/self.crt" "--from-file=${DIR}/../certificate/self.key" -} - -# This script must be run as root -if [[ $EUID -ne 0 ]]; then - echo -e $ECHO_PREFIX_ERROR "This script must be run as root!" 1>&2 - exit 1 -fi - -try_command hash kubectl > /dev/null - -for i in $(find "$DIR" -maxdepth 1 -name "*service.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-service.yaml//') - for j in $(kubectl get svc | awk '{print $1}' | sed -n '2, $p' | grep -v 'kubernetes'); do - if [ "$i1" == "$j" ]; then - kubectl delete -f "$i" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*deployment.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-deployment.yaml//') - for j in $(kubectl get pod | awk '{print $1}' | sed -n '2, $p' | awk -F '-' '{$NF=""; $(NF-1)=""; gsub(" ", "");gsub(" ", "-"); print}'); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*certificates.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/.yaml//') - for j in $(kubectl get secret | awk '{print $1}' | sed -n '2, $p' | grep -v 'default-token'); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done -done - -rm -rf $DIR/$EXT - -sudo mkdir -p "${NGINX_LOG_VOLUME}" - -yml="$DIR/docker-compose-template.yml" -test -f "$yml" - -dcv="$(kompose version | cut -f1 -d' ')" -mdcv="$(printf '%s\n' $dcv 1.16 | sort -r -V | head -n 1)" -if test "$mdcv" = "1.16"; then - echo "" - echo "kompose >=1.16 is required." - echo "Please upgrade kompose at https://docs.docker.com/compose/install." - echo "" - exit 0 -fi - -try_command kompose convert -f "$yml" -o "$DIR" - -"$DIR/run_with_GUI.py" "$DIR" - -"$DIR/../certificate/self-sign.sh" -create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) - -for i in $(find "$DIR" -maxdepth 1 -name "*.yaml"); do - kubectl apply -f "$i" -done diff --git a/deployment/kubernetes/stop.sh b/deployment/kubernetes/stop.sh index 5c507bf..543196b 100755 --- a/deployment/kubernetes/stop.sh +++ b/deployment/kubernetes/stop.sh @@ -26,20 +26,40 @@ fi try_command hash kubectl > /dev/null -for i in $(find "$DIR" -maxdepth 1 -name "*service.yaml"); do +for i in $(find "$DIR" -maxdepth 1 -name "*svc.yaml"); do len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-service.yaml//') - for j in $(kubectl get svc | awk '{print $1}' | sed -n '2, $p' | grep -v 'kubernetes'); do + i1=$(echo ${i:${len}} | sed 's/-svc.yaml//') + for j in $(kubectl get svc | awk '{print $1}' | sed -n '2, $p' | grep -v 'kubernetes' | awk -F '-' '{print $1}'); do if [ "$i1" == "$j" ]; then kubectl delete -f "$i" fi done done -for i in $(find "$DIR" -maxdepth 1 -name "*deployment.yaml"); do +for i in $(find "$DIR" -maxdepth 1 -name "*deploy.yaml"); do len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-deployment.yaml//') - for j in $(kubectl get pod | awk '{print $1}' | sed -n '2, $p' | awk -F '-' '{$NF=""; $(NF-1)=""; gsub(" ", "");gsub(" ", "-"); print}'); do + i1=$(echo ${i:${len}} | sed 's/-deploy.yaml//') + for j in $(kubectl get pod | awk '{print $1}' | sed -n '2, $p' | awk -F '-' '{$NF=""; $(NF-1)=""; gsub(" ", "");gsub(" ", "-"); print}' | uniq); do + if [ ${i1} == ${j} ]; then + kubectl delete -f "${i}" + fi + done +done + +for i in $(find "$DIR" -maxdepth 1 -name "*pvc.yaml"); do + len=$(echo $DIR | wc -m) + i1=$(echo ${i:${len}} | sed 's/-pvc.yaml//') + for j in $(kubectl get pvc | awk '{print $1}' | sed -n '2, $p'); do + if [ ${i1} == ${j} ]; then + kubectl delete -f "${i}" + fi + done +done + +for i in $(find "$DIR" -maxdepth 1 -name "*pv.yaml"); do + len=$(echo $DIR | wc -m) + i1=$(echo ${i:${len}} | sed 's/-pv.yaml//') + for j in $(kubectl get pv | awk '{print $1}' | sed -n '2, $p'); do if [ ${i1} == ${j} ]; then kubectl delete -f "${i}" fi diff --git a/deployment/kubernetes/update_yaml.py b/deployment/kubernetes/update_yaml.py index c203fec..498b2ef 100755 --- a/deployment/kubernetes/update_yaml.py +++ b/deployment/kubernetes/update_yaml.py @@ -2,12 +2,10 @@ import os import re -import copy -import subprocess import sys import socket import functools - +import yaml_utils def get_host_ip(): try: @@ -18,61 +16,36 @@ def get_host_ip(): s.close() return host_ip - -def update_yaml(nfs_server, volume_directory, dir_path, pods, pods_dict, node_dict,trans_cfg_dict): +def update_yaml(dir_path, pods, pods_dict,trans_cfg_dict): host_ip = get_host_ip() sys.path.append(dir_path) - import yaml_utils + if re.search("live\d+", str(pods)): print("\n\033[0;31;40mThe live video playlist URL are below:\033[0m") for pod in pods: - node_name = pods_dict[pod]["node"] - node = node_dict[node_name] - limit_cpu = 2 * float(pods_dict[pod]["cpu"]) if 2 * float( - pods_dict[pod]["cpu"]) < node["cpu"] else node["cpu"] - 1 - limit_memory = 2 * int(pods_dict[pod]["memory"]) if 2 * int( - pods_dict[pod]["memory"]) < node["memory"] else node["memory"] - 1 + limit_cpu = 2 * float(pods_dict[pod]["cpu"]) + limit_memory = str(2 * int(str(pods_dict[pod]["memory"])[0:-2])) + str(pods_dict[pod]["memory"])[-2:] - yaml_file = os.path.join(dir_path, re.match( - "([A-Za-z]+)\d*$", pod).group(1) + "-service-deployment.yaml") + yaml_file = os.path.join(dir_path, re.findall( + "([A-Za-z]+-*\d*$)", pod)[0] + "-deploy.yaml") data = yaml_utils.load_yaml_file(yaml_file) data = yaml_utils.update_resource_quotas( data, pods_dict[pod]["cpu"], limit_cpu, pods_dict[pod]["memory"], limit_memory) - data = yaml_utils.update_nodeSelector(data, node_name) - - if pod == "cdn": - data = yaml_utils.add_volumeMounts(data, True) - data = yaml_utils.add_volumes( - data, nfs_server, True, volume_directory) - node_port = 443 - service_file = dir_path + "/cdn-service-service.yaml" - service_data = yaml_utils.load_yaml_file(service_file) - service_data = yaml_utils.set_nodePort(service_data, node_port) - yaml_utils.dump_yaml_file(service_data, service_file) - - if re.search("((vod)|(live))\d+", pod): - data = yaml_utils.update_imageName( - data, pods_dict[pod]["mode"].lower(), True if re.search("vod\d+", pod) else False) - data = yaml_utils.add_volumeMounts(data, False) - data = yaml_utils.add_volumes( - data, nfs_server, False, volume_directory) - data = yaml_utils.update_service_name( - data, pod + "-service") - yaml_file = os.path.join(dir_path, pod + "-service-deployment.yaml") - - if re.search("live\d", pod): + if re.search("((vod)|(live-))\d+", pod): + yaml_file = os.path.join(dir_path, pod + "-deploy.yaml") + if re.search("live-\d", pod): live_args = { 'input_video': pods_dict[pod]["input"], "output_dict": {}} if trans_cfg_dict[pod]['hwaccel'] == 'false': if trans_cfg_dict[pod]['protocol'] == 'DASH': - if trans_cfg_dict[pod]['encoder_type'] == 'AVC' or trans_cfg_dict[pod]['encoder_type'] == 'HEVC' or trans_cfg_dict[pod]['encoder_type'] == 'AV1': + if trans_cfg_dict[pod]['encodetype'] == 'AVC' or trans_cfg_dict[pod]['encodetype'] == 'HEVC' or trans_cfg_dict[pod]['encodetype'] == 'AV1': codec_dict = {"AVC": "libx264", "HEVC": "libsvt_hevc", "AV1": "libsvt_av1"} else: print("Error: Only support AVC/HEVC/AV1! Please input correct encoder_type in transcode.cfg (" + pod + ")") os._exit() elif trans_cfg_dict[pod]['protocol'] == 'HLS': - if trans_cfg_dict[pod]['encoder_type'] == 'AVC' or trans_cfg_dict[pod]['encoder_type'] == 'HEVC': + if trans_cfg_dict[pod]['encodetype'] == 'AVC' or trans_cfg_dict[pod]['encodetype'] == 'HEVC': codec_dict = {"AVC": "libx264", "HEVC": "libsvt_hevc"} else: print("Error: Only support AVC/HEVC! Please input correct encoder_type in transcode.cfg (" + pod + ")") @@ -81,7 +54,7 @@ def update_yaml(nfs_server, volume_directory, dir_path, pods, pods_dict, node_di print("Error: Please input correct protocol(HLS/DASH) in transcode.cfg (" + pod + ")") os._exit() elif trans_cfg_dict[pod]['hwaccel'] == 'true': - if trans_cfg_dict[pod]['encoder_type'] == 'AVC' or trans_cfg_dict[pod]['encoder_type'] == 'HEVC': + if trans_cfg_dict[pod]['encodetype'] == 'AVC' or trans_cfg_dict[pod]['encodetype'] == 'HEVC': codec_dict = {"AVC": "h264_vaapi", "HEVC": "hevc_vaapi"} else: print("Error: Only support AVC/HEVC! Please input correct encoder_type in transcode.cfg (" + pod + ")") @@ -89,14 +62,11 @@ def update_yaml(nfs_server, volume_directory, dir_path, pods, pods_dict, node_di for num in range(4): if pods_dict[pod].get("transcode" + str(num), None) and pods_dict[pod]["transcode" + str(num)].get("protocol", None) and pods_dict[pod]["transcode" + str(num)].get("resolution", None) and pods_dict[pod]["transcode" + str(num)].get("bitrate", None) and pods_dict[pod]["transcode" + str(num)].get("codec", None) and pods_dict[pod]["transcode" + str(num)].get("output", None): - live_args["output_dict"][pods_dict[pod]["transcode" + str(num)]["output"] + "_" + re.search("live(\d+)", pod).group(1) + "_" + str(num)] = [pods_dict[pod]["transcode" + str(num)]["protocol"].lower(), pods_dict[pod]["transcode" + str(num)]["resolution"], pods_dict[pod]["transcode" + str(num)]["bitrate"], codec_dict[pods_dict[pod]["transcode" + str(num)]["codec"]], pods_dict[pod]["transcode" + str(num)]["framerate"], pods_dict[pod]["transcode" + str(num)]["gop"], pods_dict[pod]["transcode" + str(num)]["maxbFrames"], pods_dict[pod]["transcode" + str(num)]["refsNum"], pods_dict[pod]["transcode" + str(num)]["preset"]] + live_args["output_dict"][pods_dict[pod]["transcode" + str(num)]["output"] + "_" + re.search("live-(\d+)", pod).group(1) + "_" + str(num)] = [pods_dict[pod]["transcode" + str(num)]["protocol"].lower(), pods_dict[pod]["transcode" + str(num)]["resolution"], pods_dict[pod]["transcode" + str(num)]["bitrate"], codec_dict[pods_dict[pod]["transcode" + str(num)]["codec"]], pods_dict[pod]["transcode" + str(num)]["framerate"], pods_dict[pod]["transcode" + str(num)]["gop"], pods_dict[pod]["transcode" + str(num)]["maxbFrames"], pods_dict[pod]["transcode" + str(num)]["refsNum"], pods_dict[pod]["transcode" + str(num)]["preset"]] print("\033[0;31;40mhttps://%s/%s/%s/index.%s\033[0m" % (host_ip, pods_dict[pod]["transcode" + str(num)]["protocol"].lower(), pods_dict[pod]["transcode" + str(num)] - ["output"] + "_" + re.search("live(\d+)", pod).group(1) + "_" + str(num), "m3u8" if pods_dict[pod]["transcode" + str(num)]["protocol"].lower() == "hls" else "mpd")) + ["output"] + "_" + re.search("live-(\d+)", pod).group(1) + "_" + str(num), "m3u8" if pods_dict[pod]["transcode" + str(num)]["protocol"].lower() == "hls" else "mpd")) data = yaml_utils.update_command( - data, pods_dict[pod]["mode"].lower(), live_args) + data, trans_cfg_dict[pod]['hwaccel'], live_args) yaml_utils.dump_yaml_file(data, yaml_file) - - subprocess.call("rm -rf %s/vod-service-deployment.yaml" % dir_path, shell=True) - subprocess.call("rm -rf %s/live-service-deployment.yaml" % dir_path, shell=True) diff --git a/deployment/kubernetes/video-archive-pv.yaml.m4 b/deployment/kubernetes/video-archive-pv.yaml.m4 new file mode 100644 index 0000000..8557f8a --- /dev/null +++ b/deployment/kubernetes/video-archive-pv.yaml.m4 @@ -0,0 +1,32 @@ + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: video-archive +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: video-archive +spec: + capacity: + storage: defn(`VIDEO_ARCHIVE_VOLUME_SIZE')Gi + accessModes: + - ReadOnlyMany + persistentVolumeReclaimPolicy: Retain + storageClassName: video-archive + local: + path: defn(`VIDEO_ARCHIVE_VOLUME_PATH') + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "defn(`VIDEO_ARCHIVE_VOLUME_HOST')" + diff --git a/deployment/kubernetes/video-archive-pvc.yaml.m4 b/deployment/kubernetes/video-archive-pvc.yaml.m4 new file mode 100644 index 0000000..f18483f --- /dev/null +++ b/deployment/kubernetes/video-archive-pvc.yaml.m4 @@ -0,0 +1,12 @@ + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: video-archive +spec: + accessModes: + - ReadOnlyMany + storageClassName: video-archive + resources: + requests: + storage: defn(`VIDEO_ARCHIVE_VOLUME_SIZE')Gi diff --git a/deployment/kubernetes/video-cache-pv.yaml.m4 b/deployment/kubernetes/video-cache-pv.yaml.m4 new file mode 100644 index 0000000..524ae1b --- /dev/null +++ b/deployment/kubernetes/video-cache-pv.yaml.m4 @@ -0,0 +1,32 @@ + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: video-cache +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: video-cache +spec: + capacity: + storage: defn(`VIDEO_CACHE_VOLUME_SIZE')Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: video-cache + local: + path: defn(`VIDEO_CACHE_VOLUME_PATH') + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "defn(`VIDEO_CACHE_VOLUME_HOST')" + diff --git a/deployment/kubernetes/video-cache-pvc.yaml.m4 b/deployment/kubernetes/video-cache-pvc.yaml.m4 new file mode 100644 index 0000000..1657bc0 --- /dev/null +++ b/deployment/kubernetes/video-cache-pvc.yaml.m4 @@ -0,0 +1,12 @@ + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: video-cache +spec: + accessModes: + - ReadWriteMany + storageClassName: video-cache + resources: + requests: + storage: defn(`VIDEO_CACHE_VOLUME_SIZE')Gi diff --git a/deployment/kubernetes/vod-deploy.yaml.m4 b/deployment/kubernetes/vod-deploy.yaml.m4 new file mode 100644 index 0000000..7977518 --- /dev/null +++ b/deployment/kubernetes/vod-deploy.yaml.m4 @@ -0,0 +1,48 @@ +include(platform.m4) +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vod + labels: + app: vod +spec: + replicas: defn(`NVODS') + selector: + matchLabels: + app: vod + template: + metadata: + labels: + app: vod + spec: + enableServiceLinks: false + containers: + - name: vod + image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 6 + memory: 6000Mi + requests: + cpu: 3 + memory: 3000Mi + env: + - name: NO_PROXY + value: "*" + - name: no_proxy + value: "*" + volumeMounts: + - mountPath: /var/www/video + name: video-cache + - mountPath: /var/www/archive + name: video-archive + readOnly: true + volumes: + - name: video-cache + persistentVolumeClaim: + claimName: video-cache + - name: video-archive + persistentVolumeClaim: + claimName: video-archive +PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/vod-service-deployment.yaml.m4 b/deployment/kubernetes/vod-service-deployment.yaml.m4 deleted file mode 100644 index 8a416fa..0000000 --- a/deployment/kubernetes/vod-service-deployment.yaml.m4 +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: vod-service - name: vod-service -spec: - selector: - matchLabels: - app: vod-service - replicas: 1 - template: - metadata: - creationTimestamp: null - labels: - app: vod-service - spec: - containers: - - args: - - bash - - -c - - /home/main.py - image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest - imagePullPolicy: IfNotPresent - name: vod-service - resources: - limits: - cpu: "6" - memory: 6291456e3 - requests: - cpu: "3" - memory: 3145728e3 - nodeSelector: - kubernetes.io/hostname: master.machine - restartPolicy: Always diff --git a/deployment/kubernetes/yaml_utils.py b/deployment/kubernetes/yaml_utils.py index eb28753..bb91f47 100755 --- a/deployment/kubernetes/yaml_utils.py +++ b/deployment/kubernetes/yaml_utils.py @@ -16,125 +16,31 @@ def dump_yaml_file(data, fileName): default_flow_style=False, allow_unicode=True) -def update_service_name(data, service_name): - data["metadata"]["name"] = service_name - data["spec"]["template"]["metadata"]["labels"]["app"] = service_name - data["metadata"]["labels"]["app"] = service_name - data["spec"]["template"]["spec"]["containers"][0]["name"] = service_name - data["spec"]["selector"]["matchLabels"]["app"] = service_name - return data - -def update_command(data, imageName, live_args): +def update_command(data, bisHW, live_args): data['spec']['template']['spec']['containers'][0]['lifecycle'] = { 'preStop': { 'exec': {'command': [ 'rm', 'rf' ] } } } - scale_dict = {'sw': 'scale', 'hw': 'scale_vaapi'} - if imageName == "hw": + scale_dict = {'false': 'scale', 'true': 'scale_vaapi'} + if bisHW == "true": command = 'ffmpeg -re -stream_loop -1 -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -hwaccel_output_format vaapi -i /var/www/archive/' + live_args['input_video'] else: command = 'ffmpeg -re -stream_loop -1 -i /var/www/archive/' + live_args['input_video'] for key, value in live_args['output_dict'].items(): data['spec']['template']['spec']['containers'][0]['lifecycle']['preStop']['exec']['command'].append( " /var/www/" + value[0] + '/' + key ) thread = " -thread_count 96" if value[3].find('libsvt') != -1 else "" - command += ' -vf ' + scale_dict[imageName] + '=' + value[1] + ' -c:v ' + value[3] + ' -b:v ' + value[2] + ' -r ' + value[4] + ' -g ' + value[5] + ' -bf ' + value[6] + ' -refs ' + value[7] + ' -preset ' + value[8] + ' -forced-idr 1' + thread + ' -an -f flv rtmp://cdn-service/' + value[0] + '/' + key + command += ' -vf ' + scale_dict[bisHW] + '=' + value[1] + ' -c:v ' + value[3] + ' -b:v ' + value[2] + ' -r ' + value[4] + ' -g ' + value[5] + ' -bf ' + value[6] + ' -refs ' + value[7] + ' -preset ' + value[8] + ' -forced-idr 1' + thread + ' -an -f flv rtmp://cdn-service/' + value[0] + '/' + key command_caps = ['bash', '-c', command + ' -abr_pipeline'] data['spec']['template']['spec']['containers'][0].update( {'args': command_caps}) return data -def update_imageName(data, imageName, isVOD): - if imageName == "hw": - data['spec']['template']['spec']['containers'][0]['resources']['limits']['gpu.intel.com/i915'] = 1 - return data - -def update_nodeSelector(data, nodeName): - data['spec']['template']['spec']['nodeSelector']['kubernetes.io/hostname'] = nodeName - return data - -def add_volumeMounts(data, isCDN): - volumemounts_caps = [{'name': 'archive', - 'mountPath': '/var/www/archive', - 'readOnly': False}, - {'name': 'dash', - 'mountPath': '/var/www/dash', - 'readOnly': False}, - {'name': 'hls', - 'mountPath': '/var/www/hls', - 'readOnly': False}] - - if isCDN: - volumemounts_caps += [{'name': 'html', - 'mountPath': '/var/www/html', - 'readOnly': True}, - {'name': 'log', - 'mountPath': '/var/www/log', - 'readOnly': False}, - {'name': 'secrets', - 'mountPath': '/var/run/secrets', - 'readOnly': True}] - data['spec']['template']['spec']['containers'][0].update( - {'volumeMounts': volumemounts_caps}) - return data - -def add_volumes(data, nfs_server, isCDN, volume_directory): - if nfs_server == "localhost": - volumes_caps = [{'name': 'archive', - 'hostPath': - {'path': volume_directory + '/volume/video/archive'}}, - {'name': 'dash', - 'hostPath': - {'path': volume_directory + '/volume/video/dash'}}, - {'name': 'hls', - 'hostPath': - {'path': volume_directory + '/volume/video/hls'}}] - - if isCDN: - volumes_caps += [{'name': 'html', - 'hostPath': - {'path': volume_directory + '/volume/html'}}, - {'name': 'log', - 'hostPath': - {'path': '/var/log/nginx'}}, - {'name': 'secrets', - 'secret': {'secretName': 'self-signed-certificate'}}] - else: - volumes_caps = [{'name': 'archive', - 'nfs': - {'path': volume_directory + '/volume/video/archive', - 'server': nfs_server}}, - {'name': 'dash', - 'nfs': - {'path': volume_directory + '/volume/video/dash', - 'server': nfs_server}}, - {'name': 'hls', - 'nfs': - {'path': volume_directory + '/volume/video/hls', - 'server': nfs_server}}] - - if isCDN: - volumes_caps += [{'name': 'html', - 'nfs': - {'path': volume_directory + '/volume/html', - 'server': nfs_server}}, - {'name': 'log', - 'hostPath': - {'path': '/var/log/nginx'}}, - {'name': 'secrets', - 'secret': {'secretName': 'self-signed-certificate'}}] - data['spec']['template']['spec'].update({'volumes': volumes_caps}) - return data - -def set_nodePort(data, port): - data['spec']['ports'][0].update({'nodePort': port}) - return data - def update_resource_quotas( data, request_cpu, limit_cpu, request_memory, limit_memory): data["spec"]["template"]["spec"]["containers"][0]["resources"]["requests"] = { "cpu": str(int(float(request_cpu) * 1000)) + "m", - "memory": str(request_memory) + "Mi" + "memory": str(request_memory) } data["spec"]["template"]["spec"]["containers"][0]["resources"]["limits"] = { "cpu": str(int(float(limit_cpu) * 1000)) + "m", - "memory": str(limit_memory) + "Mi" + "memory": str(limit_memory) } return data diff --git a/deployment/kubernetes/zookeeper-deploy.yaml.m4 b/deployment/kubernetes/zookeeper-deploy.yaml.m4 new file mode 100644 index 0000000..fd937eb --- /dev/null +++ b/deployment/kubernetes/zookeeper-deploy.yaml.m4 @@ -0,0 +1,41 @@ +include(platform.m4) +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper + labels: + app: zookeeper +spec: + replicas: 1 + selector: + matchLabels: + app: zookeeper + template: + metadata: + labels: + app: zookeeper + spec: + enableServiceLinks: false + containers: + - name: zookeeper + image: zookeeper:3.5.6 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 2181 + env: + - name: "ZOO_TICK_TIME" + value: "10000" + - name: "ZOO_MAX_CLIENT_CNXNS" + value: "160000" + - name: "ZOO_AUTOPURGE_PURGEINTERVAL" + value: "1" + - name: "ZOO_LOG4J_PROP" + value: "ERROR" + resources: + requests: + cpu: 1 + memory: 500Mi + limits: + cpu: 2 + memory: 1000Mi +PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 b/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 deleted file mode 100644 index 118f052..0000000 --- a/deployment/kubernetes/zookeeper-service-deployment.yaml.m4 +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: zookeeper-service - name: zookeeper-service -spec: - selector: - matchLabels: - app: zookeeper-service - replicas: 1 - template: - metadata: - creationTimestamp: null - labels: - app: zookeeper-service - spec: - containers: - - env: - - name: ZOOKEEPER_CLIENT_PORT - value: "2181" - - name: ZOOKEEPER_HEAP_OPTS - value: -Xmx2048m -Xms2048m - - name: ZOOKEEPER_LOG4J_LOGGERS - value: zookeepr=ERROR - - name: ZOOKEEPER_LOG4J_ROOT_LOGLEVEL - value: ERROR - - name: ZOOKEEPER_MAX_CLIENT_CNXNS - value: "20000" - - name: ZOOKEEPER_SERVER_ID - value: "1" - - name: ZOOKEEPER_TICK_TIME - value: "2000" - image: zookeeper:latest - name: zookeeper-service - ports: - - containerPort: 2181 - resources: - limits: - cpu: "2" - memory: 1048576e3 - requests: - cpu: "1" - memory: 524288e3 - nodeSelector: - kubernetes.io/hostname: master.machine - restartPolicy: Always diff --git a/deployment/kubernetes/zookeeper-service-service.yaml.m4 b/deployment/kubernetes/zookeeper-service-service.yaml.m4 deleted file mode 100644 index 3bfaaba..0000000 --- a/deployment/kubernetes/zookeeper-service-service.yaml.m4 +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: zookeeper-service - name: zookeeper-service -spec: - ports: - - name: "2181" - port: 2181 - targetPort: 2181 - selector: - app: zookeeper-service - type: NodePort diff --git a/deployment/kubernetes/zookeeper-svc.yaml.m4 b/deployment/kubernetes/zookeeper-svc.yaml.m4 new file mode 100644 index 0000000..b89c392 --- /dev/null +++ b/deployment/kubernetes/zookeeper-svc.yaml.m4 @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-service + labels: + app: zookeeper +spec: + ports: + - port: 2181 + protocol: TCP + selector: + app: zookeeper diff --git a/script/install_dependency.sh b/script/install_dependency.sh index 7933c77..c9b5520 100755 --- a/script/install_dependency.sh +++ b/script/install_dependency.sh @@ -56,10 +56,8 @@ if [ "$LINUX_DISTRO" == "Ubuntu" ]; then try_command apt-get update try_command apt-get install -y docker-ce docker-ce-cli containerd.io try_command curl -L "https://github.com/docker/compose/releases/download/1.24.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose - try_command curl -L https://github.com/kubernetes/kompose/releases/download/v1.18.0/kompose-linux-amd64 -o /usr/local/bin/kompose - try_command chmod +x /usr/local/bin/kompose - try_command apt-get install -y python3-pip libgtk-3-dev - try_command pip3 install ruamel.yaml fabric3 wxpython + try_command apt-get install -y python3-pip + try_command pip3 install ruamel.yaml fabric3 elif [ "$LINUX_DISTRO" == "CentOS" ]; then try_command yum install -y curl cmake yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine @@ -67,12 +65,9 @@ elif [ "$LINUX_DISTRO" == "CentOS" ]; then try_command yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo try_command yum install -y docker-ce docker-ce-cli containerd.io try_command curl -L "https://github.com/docker/compose/releases/download/1.24.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose - try_command curl -L https://github.com/kubernetes/kompose/releases/download/v1.18.0/kompose-linux-amd64 -o /usr/bin/kompose - try_command chmod +x /usr/bin/kompose try_command yum install -y epel-release - try_command yum install -y python36 python36-pip python3-devel gtk3-devel + try_command yum install -y python36 python36-pip python3-devel try_command pip3 install ruamel.yaml fabric3 - try_command yum -y install wxPython else echo -e $ECHO_PREFIX_INFO "The installation will be cancelled." echo -e $ECHO_PREFIX_INFO "The CDN-Transcode-Sample does not support this OS, please use Ubuntu 18.04 or CentOS 7.6.\n" diff --git a/xcode-server/hardware/main.py b/xcode-server/hardware/main.py index 2987ae5..30fa898 100755 --- a/xcode-server/hardware/main.py +++ b/xcode-server/hardware/main.py @@ -13,8 +13,8 @@ KAFKA_GROUP = "content_provider_dash_hls_creator" ARCHIVE_ROOT = "/var/www/archive" -DASH_ROOT = "/var/www/dash" -HLS_ROOT = "/var/www/hls" +DASH_ROOT = "/var/www/video/dash" +HLS_ROOT = "/var/www/video/hls" def process_stream(stream): stream_name = stream.split("/")[1] diff --git a/xcode-server/software/main.py b/xcode-server/software/main.py index 2987ae5..30fa898 100755 --- a/xcode-server/software/main.py +++ b/xcode-server/software/main.py @@ -13,8 +13,8 @@ KAFKA_GROUP = "content_provider_dash_hls_creator" ARCHIVE_ROOT = "/var/www/archive" -DASH_ROOT = "/var/www/dash" -HLS_ROOT = "/var/www/hls" +DASH_ROOT = "/var/www/video/dash" +HLS_ROOT = "/var/www/video/hls" def process_stream(stream): stream_name = stream.split("/")[1] From 42b45bbad44c4d53f8686ff60e4c0462fe49a003 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 25 May 2020 13:27:44 -0700 Subject: [PATCH 19/91] fix helm --- deployment/kubernetes/CMakeLists.txt | 7 +- deployment/kubernetes/build.sh | 97 +--------------- deployment/kubernetes/helm/CMakeLists.txt | 2 +- deployment/kubernetes/helm/build.sh | 31 +---- .../templates/cdn-service-deployment.yaml | 21 +--- .../templates/cdn-service-service.yaml | 2 +- ...deo-dash-pvc.yaml => video-cache-pvc.yaml} | 6 +- .../templates/vod-service-deployment.yaml | 13 +-- .../helm/cdn-transcode/values.yaml.m4 | 10 +- deployment/kubernetes/helm/html-pv.yaml.m4 | 31 ----- deployment/kubernetes/helm/mkvolume.sh | 35 ------ deployment/kubernetes/helm/start.sh | 2 +- deployment/kubernetes/helm/stop.sh | 2 +- .../kubernetes/helm/video-archive-pv.yaml.m4 | 31 ----- .../kubernetes/helm/video-dash-pv.yaml.m4 | 31 ----- .../kubernetes/helm/video-hls-pv.yaml.m4 | 31 ----- deployment/kubernetes/mkvolume.sh | 13 ++- deployment/kubernetes/volume-info.sh | 9 ++ deployment/kubernetes/yaml/CMakeLists.txt | 10 ++ deployment/kubernetes/yaml/build.sh | 107 ++++++++++++++++++ deployment/kubernetes/yaml/cdn-deploy.yaml | 59 ++++++++++ .../kubernetes/{ => yaml}/cdn-deploy.yaml.m4 | 0 deployment/kubernetes/yaml/cdn-svc.yaml | 18 +++ .../kubernetes/{ => yaml}/cdn-svc.yaml.m4 | 0 .../kubernetes/yaml/cpu_mem_management.cfg | 24 ++++ .../{ => yaml}/cpu_mem_management.cfg.m4 | 0 deployment/kubernetes/yaml/kafka-deploy.yaml | 69 +++++++++++ .../{ => yaml}/kafka-deploy.yaml.m4 | 0 .../kafka-svc.yaml} | 0 deployment/kubernetes/yaml/kafka-svc.yaml.m4 | 12 ++ deployment/kubernetes/yaml/live-0-deploy.yaml | 56 +++++++++ .../kubernetes/{ => yaml}/live-deploy.yaml.m4 | 0 deployment/kubernetes/yaml/live-transcode.cfg | 14 +++ deployment/kubernetes/{ => yaml}/platform.m4 | 0 deployment/kubernetes/yaml/redis-deploy.yaml | 41 +++++++ .../{ => yaml}/redis-deploy.yaml.m4 | 0 .../redis-svc.yaml} | 0 deployment/kubernetes/yaml/redis-svc.yaml.m4 | 12 ++ .../kubernetes/{ => yaml}/run_with_command.py | 0 deployment/kubernetes/{ => yaml}/start.sh | 0 deployment/kubernetes/{ => yaml}/stop.sh | 0 .../kubernetes/{ => yaml}/update_yaml.py | 0 .../video-archive-pvc.yaml} | 7 +- .../{ => yaml}/video-archive-pvc.yaml.m4 | 0 .../video-cache-pvc.yaml} | 7 +- .../{ => yaml}/video-cache-pvc.yaml.m4 | 0 deployment/kubernetes/yaml/vod-deploy.yaml | 56 +++++++++ .../kubernetes/{ => yaml}/vod-deploy.yaml.m4 | 0 .../kubernetes/{ => yaml}/yaml_utils.py | 0 .../kubernetes/yaml/zookeeper-deploy.yaml | 49 ++++++++ .../{ => yaml}/zookeeper-deploy.yaml.m4 | 0 .../zookeeper-svc.yaml} | 0 .../kubernetes/yaml/zookeeper-svc.yaml.m4 | 12 ++ 53 files changed, 590 insertions(+), 337 deletions(-) rename deployment/kubernetes/helm/cdn-transcode/templates/{video-dash-pvc.yaml => video-cache-pvc.yaml} (53%) delete mode 100644 deployment/kubernetes/helm/html-pv.yaml.m4 delete mode 100755 deployment/kubernetes/helm/mkvolume.sh delete mode 100644 deployment/kubernetes/helm/video-archive-pv.yaml.m4 delete mode 100644 deployment/kubernetes/helm/video-dash-pv.yaml.m4 delete mode 100644 deployment/kubernetes/helm/video-hls-pv.yaml.m4 create mode 100755 deployment/kubernetes/volume-info.sh create mode 100644 deployment/kubernetes/yaml/CMakeLists.txt create mode 100755 deployment/kubernetes/yaml/build.sh create mode 100644 deployment/kubernetes/yaml/cdn-deploy.yaml rename deployment/kubernetes/{ => yaml}/cdn-deploy.yaml.m4 (100%) create mode 100644 deployment/kubernetes/yaml/cdn-svc.yaml rename deployment/kubernetes/{ => yaml}/cdn-svc.yaml.m4 (100%) create mode 100644 deployment/kubernetes/yaml/cpu_mem_management.cfg rename deployment/kubernetes/{ => yaml}/cpu_mem_management.cfg.m4 (100%) create mode 100644 deployment/kubernetes/yaml/kafka-deploy.yaml rename deployment/kubernetes/{ => yaml}/kafka-deploy.yaml.m4 (100%) rename deployment/kubernetes/{kafka-svc.yaml.m4 => yaml/kafka-svc.yaml} (100%) create mode 100644 deployment/kubernetes/yaml/kafka-svc.yaml.m4 create mode 100644 deployment/kubernetes/yaml/live-0-deploy.yaml rename deployment/kubernetes/{ => yaml}/live-deploy.yaml.m4 (100%) create mode 100644 deployment/kubernetes/yaml/live-transcode.cfg rename deployment/kubernetes/{ => yaml}/platform.m4 (100%) create mode 100644 deployment/kubernetes/yaml/redis-deploy.yaml rename deployment/kubernetes/{ => yaml}/redis-deploy.yaml.m4 (100%) rename deployment/kubernetes/{redis-svc.yaml.m4 => yaml/redis-svc.yaml} (100%) create mode 100644 deployment/kubernetes/yaml/redis-svc.yaml.m4 rename deployment/kubernetes/{ => yaml}/run_with_command.py (100%) rename deployment/kubernetes/{ => yaml}/start.sh (100%) rename deployment/kubernetes/{ => yaml}/stop.sh (100%) rename deployment/kubernetes/{ => yaml}/update_yaml.py (100%) rename deployment/kubernetes/{helm/cdn-transcode/templates/html-pvc.yaml => yaml/video-archive-pvc.yaml} (58%) rename deployment/kubernetes/{ => yaml}/video-archive-pvc.yaml.m4 (100%) rename deployment/kubernetes/{helm/cdn-transcode/templates/video-hls-pvc.yaml => yaml/video-cache-pvc.yaml} (54%) rename deployment/kubernetes/{ => yaml}/video-cache-pvc.yaml.m4 (100%) create mode 100644 deployment/kubernetes/yaml/vod-deploy.yaml rename deployment/kubernetes/{ => yaml}/vod-deploy.yaml.m4 (100%) rename deployment/kubernetes/{ => yaml}/yaml_utils.py (100%) create mode 100644 deployment/kubernetes/yaml/zookeeper-deploy.yaml rename deployment/kubernetes/{ => yaml}/zookeeper-deploy.yaml.m4 (100%) rename deployment/kubernetes/{zookeeper-svc.yaml.m4 => yaml/zookeeper-svc.yaml} (100%) create mode 100644 deployment/kubernetes/yaml/zookeeper-svc.yaml.m4 diff --git a/deployment/kubernetes/CMakeLists.txt b/deployment/kubernetes/CMakeLists.txt index 99defd6..248dfe8 100644 --- a/deployment/kubernetes/CMakeLists.txt +++ b/deployment/kubernetes/CMakeLists.txt @@ -1,6 +1,7 @@ -set(service "kubernetes") +set(service "pv") include("${CMAKE_SOURCE_DIR}/script/service.cmake") -include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") +include("${CMAKE_SOURCE_DIR}/script/scan-all.cmake") +add_custom_target(volume ${CMAKE_CURRENT_SOURCE_DIR}/mkvolume.sh) # add cleanup files file(GLOB m4files "${CMAKE_CURRENT_SOURCE_DIR}/*.yaml.m4") @@ -8,5 +9,3 @@ foreach(m4file ${m4files}) string(REPLACE ".yaml.m4" ".yaml" yamlfile "${m4file}") set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${yamlfile}") endforeach(m4file) - -add_custom_target(volume ${CMAKE_CURRENT_SOURCE_DIR}/mkvolume.sh) diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh index 1828507..2718c88 100755 --- a/deployment/kubernetes/build.sh +++ b/deployment/kubernetes/build.sh @@ -1,52 +1,12 @@ #!/bin/bash -e DIR=$(dirname $(readlink -f "$0")) -NVODS="${1:-1}" -NLIVES="${2:-1}" -REGISTRY="$3" -HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') - -echo "Generating templates with NVODS=${NVODS}, NLIVES=${NLIVES}" - -find "${DIR}" -maxdepth 1 -name "*.yaml" -exec rm -rf "{}" \; -find "${DIR}" -maxdepth 1 -name "*.cfg" -exec rm -rf "{}" \; -rm -rf "$DIR/../../volume/video/hls" -rm -rf "$DIR/../../volume/video/dash" -mkdir -p "$DIR/../../volume/video/hls" -mkdir -p "$DIR/../../volume/video/dash" - -export CDN_CPU_REQUEST=2 -export CDN_MEM_REQUEST=2000Mi -export REDIS_CPU_REQUEST=1 -export REDIS_MEM_REQUEST=500Mi -export ZOOKEEPER_CPU_REQUEST=1 -export ZOOKEEPER_MEM_REQUEST=500Mi -export KAFKA_CPU_REQUEST=1 -export KAFKA_MEM_REQUEST=500Mi -export VOD_CPU_REQUEST=3 -export VOD_MEM_REQUEST=3000Mi -export LIVE_CPU_REQUEST=4 -export LIVE_MEM_REQUEST=3000Mi - -export STREAM_NAME=bbb_sunflower_1080p_30fps_normal.mp4 -export STREAM_WIDTH=856 -export STREAM_HEIGHT=480 -export STREAM_ENCODE_BITRATE=8M -export STREAM_ENCODE_FRAMERATE=30 -export STREAM_ENCODE_GOP=100 -export STREAM_ENCODE_MAXBFRAMES=2 -export STREAM_ENCODE_REFSNUM=2 -export STREAM_ENCODE_PRESET=veryfast -export STREAM_ENCODE_TYPE=AVC -export STREAM_ENCODE_HWACCEL=false -export STREAM_ENCODE_PROTOCOL=HLS -export STREAM_ENCODE_DENSITY=2 +rm -rf "$DIR/../../volume/video/cache" +mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash" hosts=($(kubectl get node -l xeone3-zone!=yes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}:{range @.spec.taints[*]}{@.key}={@.effect};{end}{end}' | grep Ready=True | grep -v NoSchedule | cut -f1 -d':')) -echo $hosts - if test ${#hosts[@]} -eq 0; then printf "\nFailed to locate worker node(s) for shared storage\n\n" exit -1 @@ -54,54 +14,7 @@ elif test ${#hosts[@]} -lt 2; then hosts=(${hosts[0]} ${hosts[0]}) fi -export VIDEO_ARCHIVE_VOLUME_PATH=/tmp/archive/video -export VIDEO_ARCHIVE_VOLUME_SIZE=2 -export VIDEO_ARCHIVE_VOLUME_HOST=${hosts[1]} - -export VIDEO_CACHE_VOLUME_PATH=/tmp/cache/video -export VIDEO_CACHE_VOLUME_SIZE=2 -export VIDEO_CACHE_VOLUME_HOST=${hosts[1]} - -for template in $(find "${DIR}" -maxdepth 1 -name "*yaml.m4" -print); do - if [[ -n $(grep LIVEIDX "$template") ]]; then - for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do - yaml=${template/-deploy.yaml.m4/-${LIVEIDX}-deploy.yaml} - m4 -DLIVEIDX=${LIVEIDX} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${template}" > "${yaml}" - done - elif [[ -n $(grep NVODS "$template") ]] && [[ ${NVODS} -eq 0 ]]; then - continue - else - yaml=${template/.m4/} - m4 -DNVODS=${NVODS} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${yaml}" - fi -done - -for template in $(find "${DIR}" -maxdepth 1 -name "*cfg.m4" -print); do - cfg=${template/.m4/} - m4 $(env | grep _REQUEST | sed 's/^/-D/') -I "${DIR}" "${template}" > "${cfg}" -done - -for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do - cat <> ${DIR}/cpu_mem_management.cfg - -[live-${LIVEIDX}] -cpu = ${LIVE_CPU_REQUEST} -mem = ${LIVE_MEM_REQUEST} -EOF - cat <> ${DIR}/live-transcode.cfg -[live-${LIVEIDX}] -url = ${STREAM_NAME} -width = ${STREAM_WIDTH} -height = ${STREAM_HEIGHT} -bitrate = ${STREAM_ENCODE_BITRATE} -framerate = ${STREAM_ENCODE_FRAMERATE} -gop = ${STREAM_ENCODE_GOP} -maxbFrames = ${STREAM_ENCODE_MAXBFRAMES} -refsNum = ${STREAM_ENCODE_REFSNUM} -preset = ${STREAM_ENCODE_PRESET} -encodeType = ${STREAM_ENCODE_TYPE} -hwaccel = ${STREAM_ENCODE_HWACCEL} -protocol = ${STREAM_ENCODE_PROTOCOL} -density = ${STREAM_ENCODE_DENSITY} -EOF +. "$DIR/volume-info.sh" "${hosts[@]}" +for pv in $(find "${DIR}" -maxdepth 1 -name "*-pv.yaml.m4" -print); do + m4 $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${pv}" > "${pv/.m4/}" done diff --git a/deployment/kubernetes/helm/CMakeLists.txt b/deployment/kubernetes/helm/CMakeLists.txt index a20f9ba..966a884 100644 --- a/deployment/kubernetes/helm/CMakeLists.txt +++ b/deployment/kubernetes/helm/CMakeLists.txt @@ -1,4 +1,4 @@ set(service "helm") include("${CMAKE_SOURCE_DIR}/script/service.cmake") include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") -add_custom_target(volume ${CMAKE_CURRENT_SOURCE_DIR}/mkvolume.sh) +add_dependencies(build_${service} build_pv) diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh index cc4a2e9..d835d43 100755 --- a/deployment/kubernetes/helm/build.sh +++ b/deployment/kubernetes/helm/build.sh @@ -10,36 +10,7 @@ if [ ! -x /usr/bin/helm ] && [ ! -x /usr/local/bin/helm ]; then exit 0 fi -echo "Generating persistent volume yaml(s)" -# list all workers -hosts=($(kubectl get node -l vcac-zone!=yes -o custom-columns=NAME:metadata.name,STATUS:status.conditions[-1].type,TAINT:spec.taints | grep " Ready " | grep -v "NoSchedule" | cut -f1 -d' ')) -if test ${#hosts[@]} -eq 0; then - printf "\nFailed to locate worker node(s) for shared storage\n\n" - exit 1 -elif test ${#hosts[@]} -lt 2; then - hosts=(${hosts[0]} ${hosts[0]}) -fi - -export HTML_VOLUME_PATH=/tmp/volume/html -export HTML_VOLUME_SIZE=1Gi -export HTML_VOLUME_HOST=${hosts[0]} - -export ARCHIVE_VOLUME_PATH=/tmp/volume/video/archive -export ARCHIVE_VOLUME_SIZE=1Gi -export ARCHIVE_VOLUME_HOST=${hosts[0]} - -export DASH_VOLUME_PATH=/tmp/volume/video/dash -export DASH_VOLUME_SIZE=1Gi -export DASH_VOLUME_HOST=${hosts[1]} - -export HLS_VOLUME_PATH=/tmp/volume/video/hls -export HLS_VOLUME_SIZE=1Gi -export HLS_VOLUME_HOST=${hosts[1]} - -for pv in "$DIR"/*-pv.yaml.m4; do - m4 $(env | grep _VOLUME_ | sed 's/^/-D/') "$pv" > "${pv/.m4/}" -done - echo "Generating helm chart" +. "${DIR}/../volume-info.sh" m4 -DREGISTRY_PREFIX=${REGISTRY} -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml" diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml index 2ee6c43..b7f6d65 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml @@ -24,36 +24,25 @@ spec: imagePullPolicy: IfNotPresent name: cdn-service ports: - - containerPort: 8080 + - containerPort: 8443 - containerPort: 1935 volumeMounts: - mountPath: /var/run/secrets name: secrets readOnly: true - - mountPath: /var/www/html - name: html - readOnly: true - mountPath: /var/www/archive name: archive readOnly: true - - mountPath: /var/www/dash - name: dash - - mountPath: /var/www/hls - name: hls + - mountPath: /var/www/video + name: cache volumes: - name: secrets secret: secretName: self-signed-certificate - - name: html - persistentVolumeClaim: - claimName: html - name: archive persistentVolumeClaim: claimName: video-archive - - name: dash - persistentVolumeClaim: - claimName: video-dash - - name: hls + - name: cache persistentVolumeClaim: - claimName: video-hls + claimName: video-cache restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml index 33523f1..5b2ffaa 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml @@ -8,7 +8,7 @@ spec: ports: - name: "443" port: 443 - targetPort: 8080 + targetPort: 8443 - name: "1935" port: 1935 targetPort: 1935 diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/video-dash-pvc.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/video-cache-pvc.yaml similarity index 53% rename from deployment/kubernetes/helm/cdn-transcode/templates/video-dash-pvc.yaml rename to deployment/kubernetes/helm/cdn-transcode/templates/video-cache-pvc.yaml index 101c890..2c34039 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/video-dash-pvc.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/video-cache-pvc.yaml @@ -2,12 +2,12 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: video-dash + name: video-cache spec: accessModes: - ReadWriteMany - storageClassName: video-dash + storageClassName: video-cache resources: requests: - storage: "{{ .Values.volume.video.dash.size }}" + storage: "{{ .Values.volume.video.cache.size }}" diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml index b2fd110..d0cb5f1 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml @@ -27,18 +27,13 @@ spec: - mountPath: /var/www/archive name: archive readOnly: true - - mountPath: /var/www/dash - name: dash - - mountPath: /var/www/hls - name: hls + - mountPath: /var/www/video + name: cache volumes: - name: archive persistentVolumeClaim: claimName: video-archive - - name: dash + - name: cache persistentVolumeClaim: - claimName: video-dash - - name: hls - persistentVolumeClaim: - claimName: video-hls + claimName: video-cache restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index aa5dea0..643f127 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -40,12 +40,8 @@ cdn: hostIP: defn(`HOSTIP') volume: - html: - size: defn(`HTML_VOLUME_SIZE') video: archive: - size: defn(`ARCHIVE_VOLUME_SIZE') - dash: - size: defn(`DASH_VOLUME_SIZE') - hls: - size: defn(`HLS_VOLUME_SIZE') + size: defn(`VIDEO_ARCHIVE_VOLUME_SIZE') + cache: + size: defn(`VIDEO_CACHE_VOLUME_SIZE') diff --git a/deployment/kubernetes/helm/html-pv.yaml.m4 b/deployment/kubernetes/helm/html-pv.yaml.m4 deleted file mode 100644 index 2c3c3b7..0000000 --- a/deployment/kubernetes/helm/html-pv.yaml.m4 +++ /dev/null @@ -1,31 +0,0 @@ - -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: html -provisioner: kubernetes.io/no-provisioner -volumeBindingMode: WaitForFirstConsumer - ---- - -apiVersion: v1 -kind: PersistentVolume -metadata: - name: html -spec: - capacity: - storage: defn(`HTML_VOLUME_SIZE') - accessModes: - - ReadOnlyMany - persistentVolumeReclaimPolicy: Retain - storageClassName: html - local: - path: defn(`HTML_VOLUME_PATH') - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - "defn(`HTML_VOLUME_HOST')" diff --git a/deployment/kubernetes/helm/mkvolume.sh b/deployment/kubernetes/helm/mkvolume.sh deleted file mode 100755 index f2e6c20..0000000 --- a/deployment/kubernetes/helm/mkvolume.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -e - -DIR=$(dirname $(readlink -f "$0")) - -echo "Making volumes..." -HOSTS=$(kubectl get node -o 'custom-columns=NAME:.status.addresses[?(@.type=="Hostname")].address,IP:.status.addresses[?(@.type=="InternalIP")].address' | awk '!/NAME/{print $1":"$2}') -awk -v DIR="$DIR" -v HOSTS="$HOSTS" ' -BEGIN{ - split(HOSTS,tmp1," "); - for (i in tmp1) { - split(tmp1[i],tmp2,":"); - host2ip[tmp2[1]]=tmp2[2]; - } -} -/name:/ { - gsub("-","/",$2) - content="\""DIR"/../../../volume/"$2"\"" -} -/path:/ { - path=$2 -} -/- ".*"/ { - host=host2ip[substr($2,2,length($2)-2)]; - paths[host][path]=1; - contents[host][path]=content -} -END { - for (host in paths) { - for (path in paths[host]) { - system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); - system("scp -r "contents[host][path]"/* "host":"path); - } - } -} -' "$DIR"/*-pv.yaml diff --git a/deployment/kubernetes/helm/start.sh b/deployment/kubernetes/helm/start.sh index 61d931b..979796d 100755 --- a/deployment/kubernetes/helm/start.sh +++ b/deployment/kubernetes/helm/start.sh @@ -10,7 +10,7 @@ function create_secret { "$DIR/../../certificate/self-sign.sh" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) -for yaml in $(find "$DIR" -maxdepth 1 -name "*-pv.yaml" -print); do +for yaml in $(find "$DIR/.." -maxdepth 1 -name "*-pv.yaml" -print); do kubectl apply -f "$yaml" done helm install cdn-transcode "$DIR/cdn-transcode" diff --git a/deployment/kubernetes/helm/stop.sh b/deployment/kubernetes/helm/stop.sh index 63ee139..a824f88 100755 --- a/deployment/kubernetes/helm/stop.sh +++ b/deployment/kubernetes/helm/stop.sh @@ -5,7 +5,7 @@ DIR=$(dirname $(readlink -f "$0")) helm uninstall cdn-transcode # delete pvs and scs -for yaml in $(find "${DIR}" -maxdepth 1 -name "*-pv.yaml" -print); do +for yaml in $(find "${DIR}/.." -maxdepth 1 -name "*-pv.yaml" -print); do kubectl delete --wait=false -f "$yaml" --ignore-not-found=true 2>/dev/null done diff --git a/deployment/kubernetes/helm/video-archive-pv.yaml.m4 b/deployment/kubernetes/helm/video-archive-pv.yaml.m4 deleted file mode 100644 index 93f4404..0000000 --- a/deployment/kubernetes/helm/video-archive-pv.yaml.m4 +++ /dev/null @@ -1,31 +0,0 @@ - -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: video-archive -provisioner: kubernetes.io/no-provisioner -volumeBindingMode: WaitForFirstConsumer - ---- - -apiVersion: v1 -kind: PersistentVolume -metadata: - name: video-archive -spec: - capacity: - storage: defn(`ARCHIVE_VOLUME_SIZE') - accessModes: - - ReadOnlyMany - persistentVolumeReclaimPolicy: Retain - storageClassName: video-archive - local: - path: defn(`ARCHIVE_VOLUME_PATH') - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - "defn(`ARCHIVE_VOLUME_HOST')" diff --git a/deployment/kubernetes/helm/video-dash-pv.yaml.m4 b/deployment/kubernetes/helm/video-dash-pv.yaml.m4 deleted file mode 100644 index e01a117..0000000 --- a/deployment/kubernetes/helm/video-dash-pv.yaml.m4 +++ /dev/null @@ -1,31 +0,0 @@ - -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: video-dash -provisioner: kubernetes.io/no-provisioner -volumeBindingMode: WaitForFirstConsumer - ---- - -apiVersion: v1 -kind: PersistentVolume -metadata: - name: video-dash -spec: - capacity: - storage: defn(`DASH_VOLUME_SIZE') - accessModes: - - ReadWriteMany - persistentVolumeReclaimPolicy: Retain - storageClassName: video-dash - local: - path: defn(`DASH_VOLUME_PATH') - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - "defn(`DASH_VOLUME_HOST')" diff --git a/deployment/kubernetes/helm/video-hls-pv.yaml.m4 b/deployment/kubernetes/helm/video-hls-pv.yaml.m4 deleted file mode 100644 index d95e90b..0000000 --- a/deployment/kubernetes/helm/video-hls-pv.yaml.m4 +++ /dev/null @@ -1,31 +0,0 @@ - -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: video-hls -provisioner: kubernetes.io/no-provisioner -volumeBindingMode: WaitForFirstConsumer - ---- - -apiVersion: v1 -kind: PersistentVolume -metadata: - name: video-hls -spec: - capacity: - storage: defn(`HLS_VOLUME_SIZE') - accessModes: - - ReadWriteMany - persistentVolumeReclaimPolicy: Retain - storageClassName: video-hls - local: - path: defn(`HLS_VOLUME_PATH') - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - "defn(`HLS_VOLUME_HOST')" diff --git a/deployment/kubernetes/mkvolume.sh b/deployment/kubernetes/mkvolume.sh index 77dc93e..91d2092 100755 --- a/deployment/kubernetes/mkvolume.sh +++ b/deployment/kubernetes/mkvolume.sh @@ -21,12 +21,15 @@ BEGIN{ } /- ".*"/ { host=host2ip[substr($2,2,length($2)-2)]; - system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); - if (path == "/tmp/archive/video") { - system("scp -r "content"/* "host":"path); - } + paths[host][path]=1; + contents[host][path]=content } END { - system("echo finished...") + for (host in paths) { + for (path in paths[host]) { + system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); + system("scp -r "contents[host][path]"/* "host":"path); + } + } } ' "$DIR"/*-pv.yaml diff --git a/deployment/kubernetes/volume-info.sh b/deployment/kubernetes/volume-info.sh new file mode 100755 index 0000000..57925b0 --- /dev/null +++ b/deployment/kubernetes/volume-info.sh @@ -0,0 +1,9 @@ +#!/bin/bash -e + +export VIDEO_ARCHIVE_VOLUME_PATH=/tmp/archive/video +export VIDEO_ARCHIVE_VOLUME_SIZE=2 +export VIDEO_ARCHIVE_VOLUME_HOST=$1 + +export VIDEO_CACHE_VOLUME_PATH=/tmp/cache/video +export VIDEO_CACHE_VOLUME_SIZE=2 +export VIDEO_CACHE_VOLUME_HOST=$2 diff --git a/deployment/kubernetes/yaml/CMakeLists.txt b/deployment/kubernetes/yaml/CMakeLists.txt new file mode 100644 index 0000000..1ff63f9 --- /dev/null +++ b/deployment/kubernetes/yaml/CMakeLists.txt @@ -0,0 +1,10 @@ +set(service "kubernetes") +include("${CMAKE_SOURCE_DIR}/script/service.cmake") +include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") + +# add cleanup files +file(GLOB m4files "${CMAKE_CURRENT_SOURCE_DIR}/*.yaml.m4") +foreach(m4file ${m4files}) + string(REPLACE ".yaml.m4" ".yaml" yamlfile "${m4file}") + set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${yamlfile}") +endforeach(m4file) diff --git a/deployment/kubernetes/yaml/build.sh b/deployment/kubernetes/yaml/build.sh new file mode 100755 index 0000000..1828507 --- /dev/null +++ b/deployment/kubernetes/yaml/build.sh @@ -0,0 +1,107 @@ +#!/bin/bash -e + +DIR=$(dirname $(readlink -f "$0")) +NVODS="${1:-1}" +NLIVES="${2:-1}" +REGISTRY="$3" + +HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') + +echo "Generating templates with NVODS=${NVODS}, NLIVES=${NLIVES}" + +find "${DIR}" -maxdepth 1 -name "*.yaml" -exec rm -rf "{}" \; +find "${DIR}" -maxdepth 1 -name "*.cfg" -exec rm -rf "{}" \; +rm -rf "$DIR/../../volume/video/hls" +rm -rf "$DIR/../../volume/video/dash" +mkdir -p "$DIR/../../volume/video/hls" +mkdir -p "$DIR/../../volume/video/dash" + +export CDN_CPU_REQUEST=2 +export CDN_MEM_REQUEST=2000Mi +export REDIS_CPU_REQUEST=1 +export REDIS_MEM_REQUEST=500Mi +export ZOOKEEPER_CPU_REQUEST=1 +export ZOOKEEPER_MEM_REQUEST=500Mi +export KAFKA_CPU_REQUEST=1 +export KAFKA_MEM_REQUEST=500Mi +export VOD_CPU_REQUEST=3 +export VOD_MEM_REQUEST=3000Mi +export LIVE_CPU_REQUEST=4 +export LIVE_MEM_REQUEST=3000Mi + +export STREAM_NAME=bbb_sunflower_1080p_30fps_normal.mp4 +export STREAM_WIDTH=856 +export STREAM_HEIGHT=480 +export STREAM_ENCODE_BITRATE=8M +export STREAM_ENCODE_FRAMERATE=30 +export STREAM_ENCODE_GOP=100 +export STREAM_ENCODE_MAXBFRAMES=2 +export STREAM_ENCODE_REFSNUM=2 +export STREAM_ENCODE_PRESET=veryfast +export STREAM_ENCODE_TYPE=AVC +export STREAM_ENCODE_HWACCEL=false +export STREAM_ENCODE_PROTOCOL=HLS +export STREAM_ENCODE_DENSITY=2 + +hosts=($(kubectl get node -l xeone3-zone!=yes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}:{range @.spec.taints[*]}{@.key}={@.effect};{end}{end}' | grep Ready=True | grep -v NoSchedule | cut -f1 -d':')) + +echo $hosts + +if test ${#hosts[@]} -eq 0; then + printf "\nFailed to locate worker node(s) for shared storage\n\n" + exit -1 +elif test ${#hosts[@]} -lt 2; then + hosts=(${hosts[0]} ${hosts[0]}) +fi + +export VIDEO_ARCHIVE_VOLUME_PATH=/tmp/archive/video +export VIDEO_ARCHIVE_VOLUME_SIZE=2 +export VIDEO_ARCHIVE_VOLUME_HOST=${hosts[1]} + +export VIDEO_CACHE_VOLUME_PATH=/tmp/cache/video +export VIDEO_CACHE_VOLUME_SIZE=2 +export VIDEO_CACHE_VOLUME_HOST=${hosts[1]} + +for template in $(find "${DIR}" -maxdepth 1 -name "*yaml.m4" -print); do + if [[ -n $(grep LIVEIDX "$template") ]]; then + for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do + yaml=${template/-deploy.yaml.m4/-${LIVEIDX}-deploy.yaml} + m4 -DLIVEIDX=${LIVEIDX} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${template}" > "${yaml}" + done + elif [[ -n $(grep NVODS "$template") ]] && [[ ${NVODS} -eq 0 ]]; then + continue + else + yaml=${template/.m4/} + m4 -DNVODS=${NVODS} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${yaml}" + fi +done + +for template in $(find "${DIR}" -maxdepth 1 -name "*cfg.m4" -print); do + cfg=${template/.m4/} + m4 $(env | grep _REQUEST | sed 's/^/-D/') -I "${DIR}" "${template}" > "${cfg}" +done + +for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do + cat <> ${DIR}/cpu_mem_management.cfg + +[live-${LIVEIDX}] +cpu = ${LIVE_CPU_REQUEST} +mem = ${LIVE_MEM_REQUEST} +EOF + cat <> ${DIR}/live-transcode.cfg +[live-${LIVEIDX}] +url = ${STREAM_NAME} +width = ${STREAM_WIDTH} +height = ${STREAM_HEIGHT} +bitrate = ${STREAM_ENCODE_BITRATE} +framerate = ${STREAM_ENCODE_FRAMERATE} +gop = ${STREAM_ENCODE_GOP} +maxbFrames = ${STREAM_ENCODE_MAXBFRAMES} +refsNum = ${STREAM_ENCODE_REFSNUM} +preset = ${STREAM_ENCODE_PRESET} +encodeType = ${STREAM_ENCODE_TYPE} +hwaccel = ${STREAM_ENCODE_HWACCEL} +protocol = ${STREAM_ENCODE_PROTOCOL} +density = ${STREAM_ENCODE_DENSITY} +EOF +done diff --git a/deployment/kubernetes/yaml/cdn-deploy.yaml b/deployment/kubernetes/yaml/cdn-deploy.yaml new file mode 100644 index 0000000..f28a079 --- /dev/null +++ b/deployment/kubernetes/yaml/cdn-deploy.yaml @@ -0,0 +1,59 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cdn + labels: + app: cdn +spec: + replicas: 1 + selector: + matchLabels: + app: cdn + template: + metadata: + labels: + app: cdn + spec: + enableServiceLinks: false + containers: + - name: cdn + image: jfsxw001.jf.intel.com:10443/ovc_cdn_service:latest + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8443 + - containerPort: 1935 + resources: + limits: + cpu: 4 + memory: 4000Mi + requests: + cpu: 2 + memory: 2000Mi + volumeMounts: + - mountPath: /var/www/archive + name: video-archive + - mountPath: /var/www/video + name: video-cache + - mountPath: /var/run/secrets + name: self-signed-certificate + readOnly: true + volumes: + - name: video-archive + persistentVolumeClaim: + claimName: video-archive + - name: video-cache + persistentVolumeClaim: + claimName: video-cache + - name: self-signed-certificate + secret: + secretName: self-signed-certificate + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "xeone3-zone" + operator: NotIn + values: + - "xeone3" diff --git a/deployment/kubernetes/cdn-deploy.yaml.m4 b/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 similarity index 100% rename from deployment/kubernetes/cdn-deploy.yaml.m4 rename to deployment/kubernetes/yaml/cdn-deploy.yaml.m4 diff --git a/deployment/kubernetes/yaml/cdn-svc.yaml b/deployment/kubernetes/yaml/cdn-svc.yaml new file mode 100644 index 0000000..38794e2 --- /dev/null +++ b/deployment/kubernetes/yaml/cdn-svc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: cdn-service + labels: + app: cdn +spec: + ports: + - port: 443 + targetPort: 8443 + name: https + - port: 1935 + protocol: TCP + name: tcp + externalIPs: + - 134.134.172.74 + selector: + app: cdn diff --git a/deployment/kubernetes/cdn-svc.yaml.m4 b/deployment/kubernetes/yaml/cdn-svc.yaml.m4 similarity index 100% rename from deployment/kubernetes/cdn-svc.yaml.m4 rename to deployment/kubernetes/yaml/cdn-svc.yaml.m4 diff --git a/deployment/kubernetes/yaml/cpu_mem_management.cfg b/deployment/kubernetes/yaml/cpu_mem_management.cfg new file mode 100644 index 0000000..9406fb7 --- /dev/null +++ b/deployment/kubernetes/yaml/cpu_mem_management.cfg @@ -0,0 +1,24 @@ +# Minimum resource requirements on container runtime, the maximum resource requirements is 2x minimum. +[cdn] +cpu = 2 +mem = 2000Mi + +[redis] +cpu = 1 +mem = 500Mi + +[zookeeper] +cpu = 1 +mem = 500Mi + +[kafka] +cpu = 1 +mem = 500Mi + +[vod] +cpu = 3 +mem = 3000Mi + +[live-0] +cpu = 4 +mem = 3000Mi diff --git a/deployment/kubernetes/cpu_mem_management.cfg.m4 b/deployment/kubernetes/yaml/cpu_mem_management.cfg.m4 similarity index 100% rename from deployment/kubernetes/cpu_mem_management.cfg.m4 rename to deployment/kubernetes/yaml/cpu_mem_management.cfg.m4 diff --git a/deployment/kubernetes/yaml/kafka-deploy.yaml b/deployment/kubernetes/yaml/kafka-deploy.yaml new file mode 100644 index 0000000..649b460 --- /dev/null +++ b/deployment/kubernetes/yaml/kafka-deploy.yaml @@ -0,0 +1,69 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka + labels: + app: kafka +spec: + replicas: 1 + selector: + matchLabels: + app: kafka + template: + metadata: + labels: + app: kafka + spec: + enableServiceLinks: false + containers: + - name: kafka + image: wurstmeister/kafka:2.12-2.4.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9092 + env: + - name: "KAFKA_BROKER_ID" + value: "1" + - name: "KAFKA_ZOOKEEPER_CONNECT" + value: "zookeeper-service:2181" + - name: "KAFKA_LISTENERS" + value: "PLAINTEXT://:9092" + - name: "KAFKA_ADVERTISED_LISTENERS" + value: "PLAINTEXT://kafka-service:9092" + - name: "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP" + value: "PLAINTEXT:PLAINTEXT" + - name: "KAFKA_INTER_BROKER_LISTENER_NAME" + value: "PLAINTEXT" + - name: "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR" + value: "1" + - name: "KAFKA_DEFAULT_REPLICATION_FACTOR" + value: "1" + - name: "KAFKA_AUTO_CREATE_TOPICS_ENABLE" + value: "true" + - name: KAFKA_CREATE_TOPICS + value: content_provider_sched:16:1 + - name: "KAFKA_NUM_PARTITIONS" + value: "16" + - name: "KAFKA_LOG_RETENTION_MINUTES" + value: "30" + - name: "KAFKA_HEAP_OPTS" + value: "-Xmx1024m -Xms1024m" + - name: "KAFKA_LOG4J_ROOT_LOGLEVEL" + value: "ERROR" + resources: + requests: + cpu: 1 + memory: 500Mi + limits: + cpu: 2 + memory: 1000Mi + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "xeone3-zone" + operator: NotIn + values: + - "xeone3" diff --git a/deployment/kubernetes/kafka-deploy.yaml.m4 b/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 similarity index 100% rename from deployment/kubernetes/kafka-deploy.yaml.m4 rename to deployment/kubernetes/yaml/kafka-deploy.yaml.m4 diff --git a/deployment/kubernetes/kafka-svc.yaml.m4 b/deployment/kubernetes/yaml/kafka-svc.yaml similarity index 100% rename from deployment/kubernetes/kafka-svc.yaml.m4 rename to deployment/kubernetes/yaml/kafka-svc.yaml diff --git a/deployment/kubernetes/yaml/kafka-svc.yaml.m4 b/deployment/kubernetes/yaml/kafka-svc.yaml.m4 new file mode 100644 index 0000000..6adfca0 --- /dev/null +++ b/deployment/kubernetes/yaml/kafka-svc.yaml.m4 @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: kafka-service + labels: + app: kafka +spec: + ports: + - port: 9092 + protocol: TCP + selector: + app: kafka diff --git a/deployment/kubernetes/yaml/live-0-deploy.yaml b/deployment/kubernetes/yaml/live-0-deploy.yaml new file mode 100644 index 0000000..9a29d80 --- /dev/null +++ b/deployment/kubernetes/yaml/live-0-deploy.yaml @@ -0,0 +1,56 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: live-0 + labels: + app: live-0 +spec: + replicas: 1 + selector: + matchLabels: + app: live-0 + template: + metadata: + labels: + app: live-0 + spec: + enableServiceLinks: false + containers: + - name: live-0 + image: jfsxw001.jf.intel.com:10443/ovc_software_transcode_service:latest + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 8 + memory: 6000Mi + requests: + cpu: 4 + memory: 3000Mi + env: + - name: NO_PROXY + value: "*" + - name: no_proxy + value: "*" + volumeMounts: + - mountPath: /var/www/video + name: video-cache + - mountPath: /var/www/archive + name: video-archive + readOnly: true + volumes: + - name: video-cache + persistentVolumeClaim: + claimName: video-cache + - name: video-archive + persistentVolumeClaim: + claimName: video-archive + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "xeone3-zone" + operator: NotIn + values: + - "xeone3" diff --git a/deployment/kubernetes/live-deploy.yaml.m4 b/deployment/kubernetes/yaml/live-deploy.yaml.m4 similarity index 100% rename from deployment/kubernetes/live-deploy.yaml.m4 rename to deployment/kubernetes/yaml/live-deploy.yaml.m4 diff --git a/deployment/kubernetes/yaml/live-transcode.cfg b/deployment/kubernetes/yaml/live-transcode.cfg new file mode 100644 index 0000000..4c64851 --- /dev/null +++ b/deployment/kubernetes/yaml/live-transcode.cfg @@ -0,0 +1,14 @@ +[live-0] +url = bbb_sunflower_1080p_30fps_normal.mp4 +width = 856 +height = 480 +bitrate = 8M +framerate = 30 +gop = 100 +maxbFrames = 2 +refsNum = 2 +preset = veryfast +encodeType = AVC +hwaccel = false +protocol = HLS +density = 2 diff --git a/deployment/kubernetes/platform.m4 b/deployment/kubernetes/yaml/platform.m4 similarity index 100% rename from deployment/kubernetes/platform.m4 rename to deployment/kubernetes/yaml/platform.m4 diff --git a/deployment/kubernetes/yaml/redis-deploy.yaml b/deployment/kubernetes/yaml/redis-deploy.yaml new file mode 100644 index 0000000..f6f208c --- /dev/null +++ b/deployment/kubernetes/yaml/redis-deploy.yaml @@ -0,0 +1,41 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis + labels: + app: redis +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + containers: + - name: redis + image: redis:latest + imagePullPolicy: IfNotPresent + command: + - redis-server + ports: + - containerPort: 6379 + resources: + requests: + cpu: 1 + memory: 500Mi + limits: + cpu: 2 + memory: 1000Mi + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "xeone3-zone" + operator: NotIn + values: + - "xeone3" diff --git a/deployment/kubernetes/redis-deploy.yaml.m4 b/deployment/kubernetes/yaml/redis-deploy.yaml.m4 similarity index 100% rename from deployment/kubernetes/redis-deploy.yaml.m4 rename to deployment/kubernetes/yaml/redis-deploy.yaml.m4 diff --git a/deployment/kubernetes/redis-svc.yaml.m4 b/deployment/kubernetes/yaml/redis-svc.yaml similarity index 100% rename from deployment/kubernetes/redis-svc.yaml.m4 rename to deployment/kubernetes/yaml/redis-svc.yaml diff --git a/deployment/kubernetes/yaml/redis-svc.yaml.m4 b/deployment/kubernetes/yaml/redis-svc.yaml.m4 new file mode 100644 index 0000000..2677ba9 --- /dev/null +++ b/deployment/kubernetes/yaml/redis-svc.yaml.m4 @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: redis-service + labels: + app: redis +spec: + ports: + - port: 6379 + protocol: TCP + selector: + app: redis diff --git a/deployment/kubernetes/run_with_command.py b/deployment/kubernetes/yaml/run_with_command.py similarity index 100% rename from deployment/kubernetes/run_with_command.py rename to deployment/kubernetes/yaml/run_with_command.py diff --git a/deployment/kubernetes/start.sh b/deployment/kubernetes/yaml/start.sh similarity index 100% rename from deployment/kubernetes/start.sh rename to deployment/kubernetes/yaml/start.sh diff --git a/deployment/kubernetes/stop.sh b/deployment/kubernetes/yaml/stop.sh similarity index 100% rename from deployment/kubernetes/stop.sh rename to deployment/kubernetes/yaml/stop.sh diff --git a/deployment/kubernetes/update_yaml.py b/deployment/kubernetes/yaml/update_yaml.py similarity index 100% rename from deployment/kubernetes/update_yaml.py rename to deployment/kubernetes/yaml/update_yaml.py diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/html-pvc.yaml b/deployment/kubernetes/yaml/video-archive-pvc.yaml similarity index 58% rename from deployment/kubernetes/helm/cdn-transcode/templates/html-pvc.yaml rename to deployment/kubernetes/yaml/video-archive-pvc.yaml index 4555f4c..5a561a5 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/html-pvc.yaml +++ b/deployment/kubernetes/yaml/video-archive-pvc.yaml @@ -2,12 +2,11 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: html + name: video-archive spec: accessModes: - ReadOnlyMany - storageClassName: html + storageClassName: video-archive resources: requests: - storage: "{{ .Values.volume.html.size }}" - + storage: 2Gi diff --git a/deployment/kubernetes/video-archive-pvc.yaml.m4 b/deployment/kubernetes/yaml/video-archive-pvc.yaml.m4 similarity index 100% rename from deployment/kubernetes/video-archive-pvc.yaml.m4 rename to deployment/kubernetes/yaml/video-archive-pvc.yaml.m4 diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/video-hls-pvc.yaml b/deployment/kubernetes/yaml/video-cache-pvc.yaml similarity index 54% rename from deployment/kubernetes/helm/cdn-transcode/templates/video-hls-pvc.yaml rename to deployment/kubernetes/yaml/video-cache-pvc.yaml index 764066e..31fbd79 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/video-hls-pvc.yaml +++ b/deployment/kubernetes/yaml/video-cache-pvc.yaml @@ -2,12 +2,11 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: video-hls + name: video-cache spec: accessModes: - ReadWriteMany - storageClassName: video-hls + storageClassName: video-cache resources: requests: - storage: "{{ .Values.volume.video.hls.size }}" - + storage: 2Gi diff --git a/deployment/kubernetes/video-cache-pvc.yaml.m4 b/deployment/kubernetes/yaml/video-cache-pvc.yaml.m4 similarity index 100% rename from deployment/kubernetes/video-cache-pvc.yaml.m4 rename to deployment/kubernetes/yaml/video-cache-pvc.yaml.m4 diff --git a/deployment/kubernetes/yaml/vod-deploy.yaml b/deployment/kubernetes/yaml/vod-deploy.yaml new file mode 100644 index 0000000..8ca209c --- /dev/null +++ b/deployment/kubernetes/yaml/vod-deploy.yaml @@ -0,0 +1,56 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vod + labels: + app: vod +spec: + replicas: 2 + selector: + matchLabels: + app: vod + template: + metadata: + labels: + app: vod + spec: + enableServiceLinks: false + containers: + - name: vod + image: jfsxw001.jf.intel.com:10443/ovc_software_transcode_service:latest + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 6 + memory: 6000Mi + requests: + cpu: 3 + memory: 3000Mi + env: + - name: NO_PROXY + value: "*" + - name: no_proxy + value: "*" + volumeMounts: + - mountPath: /var/www/video + name: video-cache + - mountPath: /var/www/archive + name: video-archive + readOnly: true + volumes: + - name: video-cache + persistentVolumeClaim: + claimName: video-cache + - name: video-archive + persistentVolumeClaim: + claimName: video-archive + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "xeone3-zone" + operator: NotIn + values: + - "xeone3" diff --git a/deployment/kubernetes/vod-deploy.yaml.m4 b/deployment/kubernetes/yaml/vod-deploy.yaml.m4 similarity index 100% rename from deployment/kubernetes/vod-deploy.yaml.m4 rename to deployment/kubernetes/yaml/vod-deploy.yaml.m4 diff --git a/deployment/kubernetes/yaml_utils.py b/deployment/kubernetes/yaml/yaml_utils.py similarity index 100% rename from deployment/kubernetes/yaml_utils.py rename to deployment/kubernetes/yaml/yaml_utils.py diff --git a/deployment/kubernetes/yaml/zookeeper-deploy.yaml b/deployment/kubernetes/yaml/zookeeper-deploy.yaml new file mode 100644 index 0000000..aa8bdb6 --- /dev/null +++ b/deployment/kubernetes/yaml/zookeeper-deploy.yaml @@ -0,0 +1,49 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper + labels: + app: zookeeper +spec: + replicas: 1 + selector: + matchLabels: + app: zookeeper + template: + metadata: + labels: + app: zookeeper + spec: + enableServiceLinks: false + containers: + - name: zookeeper + image: zookeeper:3.5.6 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 2181 + env: + - name: "ZOO_TICK_TIME" + value: "10000" + - name: "ZOO_MAX_CLIENT_CNXNS" + value: "160000" + - name: "ZOO_AUTOPURGE_PURGEINTERVAL" + value: "1" + - name: "ZOO_LOG4J_PROP" + value: "ERROR" + resources: + requests: + cpu: 1 + memory: 500Mi + limits: + cpu: 2 + memory: 1000Mi + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "xeone3-zone" + operator: NotIn + values: + - "xeone3" diff --git a/deployment/kubernetes/zookeeper-deploy.yaml.m4 b/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 similarity index 100% rename from deployment/kubernetes/zookeeper-deploy.yaml.m4 rename to deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 diff --git a/deployment/kubernetes/zookeeper-svc.yaml.m4 b/deployment/kubernetes/yaml/zookeeper-svc.yaml similarity index 100% rename from deployment/kubernetes/zookeeper-svc.yaml.m4 rename to deployment/kubernetes/yaml/zookeeper-svc.yaml diff --git a/deployment/kubernetes/yaml/zookeeper-svc.yaml.m4 b/deployment/kubernetes/yaml/zookeeper-svc.yaml.m4 new file mode 100644 index 0000000..b89c392 --- /dev/null +++ b/deployment/kubernetes/yaml/zookeeper-svc.yaml.m4 @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: zookeeper-service + labels: + app: zookeeper +spec: + ports: + - port: 2181 + protocol: TCP + selector: + app: zookeeper From 2e8f9662c5fd80549c59ac574139b38b2309aa0b Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 25 May 2020 14:29:11 -0700 Subject: [PATCH 20/91] fix docker-swarm --- deployment/docker-swarm/CMakeLists.txt | 3 +- deployment/docker-swarm/build.sh | 3 ++ deployment/docker-swarm/docker-compose.yml.m4 | 20 ++++------ deployment/docker-swarm/start.sh | 40 +++---------------- deployment/docker-swarm/stop.sh | 21 +--------- 5 files changed, 18 insertions(+), 69 deletions(-) diff --git a/deployment/docker-swarm/CMakeLists.txt b/deployment/docker-swarm/CMakeLists.txt index 74f8b99..568397a 100644 --- a/deployment/docker-swarm/CMakeLists.txt +++ b/deployment/docker-swarm/CMakeLists.txt @@ -1,4 +1,3 @@ set(service "docker_swarm") -include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") -set(service "docker_compose") +include("${CMAKE_SOURCE_DIR}/script/service.cmake") include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") diff --git a/deployment/docker-swarm/build.sh b/deployment/docker-swarm/build.sh index ce18ab4..402be67 100755 --- a/deployment/docker-swarm/build.sh +++ b/deployment/docker-swarm/build.sh @@ -4,4 +4,7 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" REGISTRY="$3" +rm -rf "$DIR/../../volume/video/cache" +mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash" + m4 -DNVODS=${NVODS} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${DIR}/docker-compose.yml.m4" > "${DIR}/docker-compose.yml" diff --git a/deployment/docker-swarm/docker-compose.yml.m4 b/deployment/docker-swarm/docker-compose.yml.m4 index a45a748..338e4a1 100644 --- a/deployment/docker-swarm/docker-compose.yml.m4 +++ b/deployment/docker-swarm/docker-compose.yml.m4 @@ -4,8 +4,6 @@ services: redis-service: image: redis:latest - ports: - - "6379:6379" restart: always deploy: replicas: 1 @@ -54,13 +52,10 @@ services: cdn-service: image: defn(`REGISTRY_PREFIX')ovc_cdn_service:latest ports: - - "443:8080" + - "443:8443" volumes: - - ${HTML_VOLUME}:/var/www/html:ro - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:rw - - ${VIDEO_DASH_VOLUME}:/var/www/dash:rw - - ${VIDEO_HLS_VOLUME}:/var/www/hls:rw - - ${NGINX_LOG_VOLUME}:/var/www/log:rw + - ${VIDEO_CACHE_VOLUME}:/var/www/video:rw depends_on: - kafka-service deploy: @@ -81,8 +76,7 @@ services: image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest volumes: - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro - - ${VIDEO_DASH_VOLUME}:/var/www/dash:rw - - ${VIDEO_HLS_VOLUME}:/var/www/hls:rw + - ${VIDEO_CACHE_VOLUME}:/var/www/video:rw deploy: replicas: defn(`NVODS') depends_on: @@ -90,13 +84,15 @@ services: - zookeeper-service live-transcode-service: - image: defn(`REGISTRY_PREFIX')vc_software_transcode_service:latest + image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest volumes: - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro depends_on: - cdn-service - command: | - bash -c 'ffmpeg -re -stream_loop -1 -i /var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4 -vf scale=2560:1440 -c:v libsvt_hevc -b:v 15M -forced-idr 1 -f flv rtmp://cdn-service/hls/big_buck_bunny_2560x1440 -vf scale=1920:1080 -c:v libsvt_hevc -b:v 10M -forced-idr 1 -f flv rtmp://cdn-service/hls/big_buck_bunny_1920x1080 -vf scale=1280:720 -c:v libx264 -b:v 8M -f flv rtmp://cdn-service/hls/big_buck_bunny_1280x720 -vf scale=854:480 -c:v libx264 -b:v 6M -f flv rtmp://cdn-service/hls/big_buck_bunny_854x480 -abr_pipeline' + environment: + no_proxy: "cdn-service" + NO_PROXY: "cdn-service" + command: ["ffmpeg","-re","-stream_loop","-1","-i","/var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4","-vf","scale=856:480","-c:v","libx264","-b:v","8000000","-forced-idr","1","-preset","veryfast","-an","-f","flv","rtmp://cdn-service/dash/media_0_0","-vf","scale=856:480","-c:v","libsvt_hevc","-b:v","8000000","-forced-idr","1","-preset","9","-an","-f","flv","rtmp://cdn-service/hls/media_0_0","-abr_pipeline"] secrets: self_key: diff --git a/deployment/docker-swarm/start.sh b/deployment/docker-swarm/start.sh index a0a7335..2b9c929 100755 --- a/deployment/docker-swarm/start.sh +++ b/deployment/docker-swarm/start.sh @@ -2,40 +2,10 @@ DIR=$(dirname $(readlink -f "$0")) export VIDEO_ARCHIVE_VOLUME=$(readlink -f "$DIR/../../volume/video/archive") -export VIDEO_DASH_VOLUME=$(readlink -f "$DIR/../../volume/video/dash") -export VIDEO_HLS_VOLUME=$(readlink -f "$DIR/../../volume/video/hls") -export NGINX_LOG_VOLUME=$(readlink -f "/var/log/nginx") -export HTML_VOLUME=$(readlink -f "$DIR/../../volume/html") +export VIDEO_CACHE_VOLUME=$(readlink -f "$DIR/../../volume/video/cache") export SECRETS_VOLUME=$(readlink -f "$DIR/../certificate") -docker container prune -f -docker volume prune -f -docker network prune -f -sudo rm -rf "${VIDEO_DASH_VOLUME}" "${VIDEO_HLS_VOLUME}" -sudo mkdir -p "${VIDEO_DASH_VOLUME}" "${VIDEO_HLS_VOLUME}" "${NGINX_LOG_VOLUME}" - -yml="$DIR/docker-compose.$(hostname).yml" -test -f "$yml" || yml="$DIR/docker-compose.yml" - -case "$1" in -docker_compose) - dcv="$(docker-compose --version | cut -f3 -d' ' | cut -f1 -d',')" - mdcv="$(printf '%s\n' $dcv 1.20 | sort -r -V | head -n 1)" - if test "$mdcv" = "1.20"; then - echo "" - echo "docker-compose >=1.20 is required." - echo "Please upgrade docker-compose at https://docs.docker.com/compose/install." - echo "" - exit 0 - fi - export USER_ID=$(id -u) - export GROUP_ID=$(id -g) - docker-compose -f "$yml" -p ovc --compatibility up - ;; -*) - export USER_ID=$(id -u) - export GROUP_ID=$(id -g) - "$DIR/../certificate/self-sign.sh" - docker stack deploy -c "$yml" ovc - ;; -esac +export USER_ID=$(id -u) +export GROUP_ID=$(id -g) +"$DIR/../certificate/self-sign.sh" +docker stack deploy -c "$DIR/docker-compose.yml" cdnt diff --git a/deployment/docker-swarm/stop.sh b/deployment/docker-swarm/stop.sh index 829d621..c7ef1d3 100755 --- a/deployment/docker-swarm/stop.sh +++ b/deployment/docker-swarm/stop.sh @@ -4,24 +4,5 @@ DIR=$(dirname $(readlink -f "$0")) yml="$DIR/docker-compose.$(hostname).yml" test -f "$yml" || yml="$DIR/docker-compose.yml" -case "$1" in -docker_compose) - dcv="$(docker-compose --version | cut -f3 -d' ' | cut -f1 -d',')" - mdcv="$(printf '%s\n' $dcv 1.10 | sort -r -V | head -n 1)" - if test "$mdcv" = "1.10"; then - echo "" - echo "docker-compose >=1.10 is required." - echo "Please upgrade docker-compose at https://docs.docker.com/compose/install." - echo "" - exit 0 - fi - docker-compose -f "$yml" -p ovc --compatibility down - ;; -*) - docker stack rm ovc - ;; -esac -docker container prune -f -docker volume prune -f -docker network prune -f +docker stack rm cdnt From e97f5deccb932d0821146a2d13c6097d8174ff5e Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 25 May 2020 19:50:04 -0700 Subject: [PATCH 21/91] fix kuberebetes --- .gitignore | 3 +- cdn-server/Dockerfile | 17 ++- cdn-server/nginx.conf | 1 + content-provider/Dockerfile | 12 ++- content-provider/build.sh | 3 - .../templates/cdn-service-deployment.yaml | 1 - .../templates/live-service-deployment.yaml | 9 ++ .../templates/video-archive-pvc.yaml | 2 +- .../kubernetes/video-archive-pv.yaml.m4 | 2 +- deployment/kubernetes/yaml/build.sh | 101 +----------------- deployment/kubernetes/yaml/cdn-deploy.yaml | 59 ---------- deployment/kubernetes/yaml/cdn-deploy.yaml.m4 | 10 +- deployment/kubernetes/yaml/cdn-svc.yaml | 18 ---- deployment/kubernetes/yaml/cdn-svc.yaml.m4 | 4 +- deployment/kubernetes/yaml/configure.m4 | 44 ++++++++ .../kubernetes/yaml/cpu_mem_management.cfg.m4 | 20 ---- deployment/kubernetes/yaml/kafka-deploy.yaml | 69 ------------ .../kubernetes/yaml/kafka-deploy.yaml.m4 | 12 ++- deployment/kubernetes/yaml/kafka-svc.yaml | 12 --- deployment/kubernetes/yaml/live-0-deploy.yaml | 56 ---------- .../kubernetes/yaml/live-deploy.yaml.m4 | 48 ++++++--- deployment/kubernetes/yaml/redis-deploy.yaml | 41 ------- .../kubernetes/yaml/redis-deploy.yaml.m4 | 10 +- deployment/kubernetes/yaml/redis-svc.yaml | 12 --- deployment/kubernetes/yaml/redis-svc.yaml.m4 | 1 + .../kubernetes/yaml/run_with_command.py | 76 ------------- deployment/kubernetes/yaml/start.sh | 91 ++-------------- deployment/kubernetes/yaml/stop.sh | 51 +-------- deployment/kubernetes/yaml/update_yaml.py | 72 ------------- .../kubernetes/yaml/video-archive-pvc.yaml | 12 --- .../kubernetes/yaml/video-archive-pvc.yaml.m4 | 2 +- .../kubernetes/yaml/video-cache-pvc.yaml | 12 --- deployment/kubernetes/yaml/vod-deploy.yaml | 56 ---------- deployment/kubernetes/yaml/vod-deploy.yaml.m4 | 10 +- deployment/kubernetes/yaml/yaml_utils.py | 46 -------- .../kubernetes/yaml/zookeeper-deploy.yaml | 49 --------- .../kubernetes/yaml/zookeeper-deploy.yaml.m4 | 10 +- deployment/kubernetes/yaml/zookeeper-svc.yaml | 12 --- script/loop.m4 | 3 + script/nfs_setup.sh | 6 +- streaming-server/Dockerfile | 19 ++-- xcode-server/hardware/Dockerfile | 15 ++- xcode-server/software/Dockerfile | 17 ++- 43 files changed, 205 insertions(+), 921 deletions(-) delete mode 100644 deployment/kubernetes/yaml/cdn-deploy.yaml delete mode 100644 deployment/kubernetes/yaml/cdn-svc.yaml create mode 100644 deployment/kubernetes/yaml/configure.m4 delete mode 100644 deployment/kubernetes/yaml/cpu_mem_management.cfg.m4 delete mode 100644 deployment/kubernetes/yaml/kafka-deploy.yaml delete mode 100644 deployment/kubernetes/yaml/kafka-svc.yaml delete mode 100644 deployment/kubernetes/yaml/live-0-deploy.yaml delete mode 100644 deployment/kubernetes/yaml/redis-deploy.yaml delete mode 100644 deployment/kubernetes/yaml/redis-svc.yaml delete mode 100755 deployment/kubernetes/yaml/run_with_command.py delete mode 100755 deployment/kubernetes/yaml/update_yaml.py delete mode 100644 deployment/kubernetes/yaml/video-archive-pvc.yaml delete mode 100644 deployment/kubernetes/yaml/video-cache-pvc.yaml delete mode 100644 deployment/kubernetes/yaml/vod-deploy.yaml delete mode 100755 deployment/kubernetes/yaml/yaml_utils.py delete mode 100644 deployment/kubernetes/yaml/zookeeper-deploy.yaml delete mode 100644 deployment/kubernetes/yaml/zookeeper-svc.yaml create mode 100644 script/loop.m4 diff --git a/.gitignore b/.gitignore index f503fa5..1407116 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,6 @@ deployment/docker-swarm/dhparam.pem deployment/docker-swarm/self.crt deployment/docker-swarm/self.key deployment/kubernetes/*.yaml -deployment/kubernetes/*.cfg -deployment/kubernetes/__pycache__/ +deployment/kubernetes/yaml/*.yaml deployment/certificate/self.crt deployment/certificate/self.key diff --git a/cdn-server/Dockerfile b/cdn-server/Dockerfile index 8ecc822..86af099 100644 --- a/cdn-server/Dockerfile +++ b/cdn-server/Dockerfile @@ -1,12 +1,23 @@ FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.3.1 -Run DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-setuptools python3-redis python-celery-common python3-tornado python3-kafka python3-kazoo vim openssh-server && rm -rf /var/lib/apt/lists/* +Run DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-setuptools python3-redis python-celery-common python3-tornado python3-kafka python3-kazoo openssh-server && rm -rf /var/lib/apt/lists/* COPY *.xsl /etc/nginx/ COPY *.conf /etc/nginx/ -COPY html /var/www/html +COPY html /var/www/html COPY *.py /home/ CMD ["/bin/bash","-c","/home/main.py&/usr/local/sbin/nginx"] WORKDIR /home -EXPOSE 8080 + +#### +ARG UID +ARG GID +## must use ; here to ignore user exist status code +RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \ + [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \ + touch /var/run/nginx.pid && \ + mkdir -p /var/log/nginx /var/lib/nginx /var/www/video /var/www/archive && \ + chown -R ${UID}:${GID} /var/run/nginx.pid /var/www /var/log/nginx /var/lib/nginx +USER ${UID} +#### diff --git a/cdn-server/nginx.conf b/cdn-server/nginx.conf index 64b50a1..61f8508 100644 --- a/cdn-server/nginx.conf +++ b/cdn-server/nginx.conf @@ -1,4 +1,5 @@ +user docker:docker; worker_processes auto; worker_rlimit_nofile 8192; daemon off; diff --git a/content-provider/Dockerfile b/content-provider/Dockerfile index f1f5c07..f8d1460 100644 --- a/content-provider/Dockerfile +++ b/content-provider/Dockerfile @@ -1,3 +1,13 @@ FROM ubuntu:18.04 -RUN apt-get update && apt-get install -y wget ffmpeg +RUN apt-get update && apt-get install -y wget ffmpeg && rm -rf /var/lib/apt/lists/* + +#### +ARG UID +ARG GID +## must use ; here to ignore user exist status code +RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \ + [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \ + chown -R ${UID}:${GID} /home +USER ${UID} +#### diff --git a/content-provider/build.sh b/content-provider/build.sh index 62c6b63..a165774 100755 --- a/content-provider/build.sh +++ b/content-provider/build.sh @@ -8,7 +8,6 @@ clips=("$sample_video/bbb_sunflower_1080p_30fps_normal.mp4") case "$(cat /proc/1/sched | head -n 1)" in *build.sh*) cd /mnt - mkdir -p archive dash hls for clip in "${clips[@]}"; do clip_name="${clip/*\//}" clip_name="${clip_name/*=/}" @@ -27,8 +26,6 @@ case "$(cat /proc/1/sched | head -n 1)" in ;; *) mkdir -p "$DIR/../volume/video/archive" - mkdir -p "$DIR/../volume/video/dash" - mkdir -p "$DIR/../volume/video/hls" . "$DIR/../script/build.sh" . "$DIR/shell.sh" /home/build.sh $@ ;; diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml index b7f6d65..658d77d 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml @@ -32,7 +32,6 @@ spec: readOnly: true - mountPath: /var/www/archive name: archive - readOnly: true - mountPath: /var/www/video name: cache volumes: diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml index 1b899f1..7915d02 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml @@ -41,6 +41,15 @@ spec: "-f","flv","rtmp://cdn-service/{{ .protocol }}/media_{{ $i }}_{{ $k }}", {{- end }} "-abr_pipeline"] + lifecycle: + preStop: + exec: + command: + - rm + - -rf +{{- range $k,$v2 := .transcode }} + - ' /var/www/video/{{ .protocol }}/media_{{ $i }}_{{ $k }}' +{{- end }} name: live-service-{{ $i }} env: - name: NO_PROXY diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml index 3056693..9ab2c9e 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml @@ -5,7 +5,7 @@ metadata: name: video-archive spec: accessModes: - - ReadOnlyMany + - ReadWriteMany storageClassName: video-archive resources: requests: diff --git a/deployment/kubernetes/video-archive-pv.yaml.m4 b/deployment/kubernetes/video-archive-pv.yaml.m4 index 8557f8a..de31308 100644 --- a/deployment/kubernetes/video-archive-pv.yaml.m4 +++ b/deployment/kubernetes/video-archive-pv.yaml.m4 @@ -16,7 +16,7 @@ spec: capacity: storage: defn(`VIDEO_ARCHIVE_VOLUME_SIZE')Gi accessModes: - - ReadOnlyMany + - ReadWriteMany persistentVolumeReclaimPolicy: Retain storageClassName: video-archive local: diff --git a/deployment/kubernetes/yaml/build.sh b/deployment/kubernetes/yaml/build.sh index 1828507..4763117 100755 --- a/deployment/kubernetes/yaml/build.sh +++ b/deployment/kubernetes/yaml/build.sh @@ -4,104 +4,9 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" REGISTRY="$3" - HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') -echo "Generating templates with NVODS=${NVODS}, NLIVES=${NLIVES}" - -find "${DIR}" -maxdepth 1 -name "*.yaml" -exec rm -rf "{}" \; -find "${DIR}" -maxdepth 1 -name "*.cfg" -exec rm -rf "{}" \; -rm -rf "$DIR/../../volume/video/hls" -rm -rf "$DIR/../../volume/video/dash" -mkdir -p "$DIR/../../volume/video/hls" -mkdir -p "$DIR/../../volume/video/dash" - -export CDN_CPU_REQUEST=2 -export CDN_MEM_REQUEST=2000Mi -export REDIS_CPU_REQUEST=1 -export REDIS_MEM_REQUEST=500Mi -export ZOOKEEPER_CPU_REQUEST=1 -export ZOOKEEPER_MEM_REQUEST=500Mi -export KAFKA_CPU_REQUEST=1 -export KAFKA_MEM_REQUEST=500Mi -export VOD_CPU_REQUEST=3 -export VOD_MEM_REQUEST=3000Mi -export LIVE_CPU_REQUEST=4 -export LIVE_MEM_REQUEST=3000Mi - -export STREAM_NAME=bbb_sunflower_1080p_30fps_normal.mp4 -export STREAM_WIDTH=856 -export STREAM_HEIGHT=480 -export STREAM_ENCODE_BITRATE=8M -export STREAM_ENCODE_FRAMERATE=30 -export STREAM_ENCODE_GOP=100 -export STREAM_ENCODE_MAXBFRAMES=2 -export STREAM_ENCODE_REFSNUM=2 -export STREAM_ENCODE_PRESET=veryfast -export STREAM_ENCODE_TYPE=AVC -export STREAM_ENCODE_HWACCEL=false -export STREAM_ENCODE_PROTOCOL=HLS -export STREAM_ENCODE_DENSITY=2 - -hosts=($(kubectl get node -l xeone3-zone!=yes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}:{range @.spec.taints[*]}{@.key}={@.effect};{end}{end}' | grep Ready=True | grep -v NoSchedule | cut -f1 -d':')) - -echo $hosts - -if test ${#hosts[@]} -eq 0; then - printf "\nFailed to locate worker node(s) for shared storage\n\n" - exit -1 -elif test ${#hosts[@]} -lt 2; then - hosts=(${hosts[0]} ${hosts[0]}) -fi - -export VIDEO_ARCHIVE_VOLUME_PATH=/tmp/archive/video -export VIDEO_ARCHIVE_VOLUME_SIZE=2 -export VIDEO_ARCHIVE_VOLUME_HOST=${hosts[1]} - -export VIDEO_CACHE_VOLUME_PATH=/tmp/cache/video -export VIDEO_CACHE_VOLUME_SIZE=2 -export VIDEO_CACHE_VOLUME_HOST=${hosts[1]} - -for template in $(find "${DIR}" -maxdepth 1 -name "*yaml.m4" -print); do - if [[ -n $(grep LIVEIDX "$template") ]]; then - for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do - yaml=${template/-deploy.yaml.m4/-${LIVEIDX}-deploy.yaml} - m4 -DLIVEIDX=${LIVEIDX} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${template}" > "${yaml}" - done - elif [[ -n $(grep NVODS "$template") ]] && [[ ${NVODS} -eq 0 ]]; then - continue - else - yaml=${template/.m4/} - m4 -DNVODS=${NVODS} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${yaml}" - fi -done - -for template in $(find "${DIR}" -maxdepth 1 -name "*cfg.m4" -print); do - cfg=${template/.m4/} - m4 $(env | grep _REQUEST | sed 's/^/-D/') -I "${DIR}" "${template}" > "${cfg}" -done - -for ((LIVEIDX=0;LIVEIDX<${NLIVES};LIVEIDX++)); do - cat <> ${DIR}/cpu_mem_management.cfg - -[live-${LIVEIDX}] -cpu = ${LIVE_CPU_REQUEST} -mem = ${LIVE_MEM_REQUEST} -EOF - cat <> ${DIR}/live-transcode.cfg -[live-${LIVEIDX}] -url = ${STREAM_NAME} -width = ${STREAM_WIDTH} -height = ${STREAM_HEIGHT} -bitrate = ${STREAM_ENCODE_BITRATE} -framerate = ${STREAM_ENCODE_FRAMERATE} -gop = ${STREAM_ENCODE_GOP} -maxbFrames = ${STREAM_ENCODE_MAXBFRAMES} -refsNum = ${STREAM_ENCODE_REFSNUM} -preset = ${STREAM_ENCODE_PRESET} -encodeType = ${STREAM_ENCODE_TYPE} -hwaccel = ${STREAM_ENCODE_HWACCEL} -protocol = ${STREAM_ENCODE_PROTOCOL} -density = ${STREAM_ENCODE_DENSITY} -EOF +. "${DIR}/../volume-info.sh" +for template in $(find "${DIR}" -maxdepth 1 -name "*.yaml.m4" -print); do + m4 -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${template/.m4/}" done diff --git a/deployment/kubernetes/yaml/cdn-deploy.yaml b/deployment/kubernetes/yaml/cdn-deploy.yaml deleted file mode 100644 index f28a079..0000000 --- a/deployment/kubernetes/yaml/cdn-deploy.yaml +++ /dev/null @@ -1,59 +0,0 @@ - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cdn - labels: - app: cdn -spec: - replicas: 1 - selector: - matchLabels: - app: cdn - template: - metadata: - labels: - app: cdn - spec: - enableServiceLinks: false - containers: - - name: cdn - image: jfsxw001.jf.intel.com:10443/ovc_cdn_service:latest - imagePullPolicy: IfNotPresent - ports: - - containerPort: 8443 - - containerPort: 1935 - resources: - limits: - cpu: 4 - memory: 4000Mi - requests: - cpu: 2 - memory: 2000Mi - volumeMounts: - - mountPath: /var/www/archive - name: video-archive - - mountPath: /var/www/video - name: video-cache - - mountPath: /var/run/secrets - name: self-signed-certificate - readOnly: true - volumes: - - name: video-archive - persistentVolumeClaim: - claimName: video-archive - - name: video-cache - persistentVolumeClaim: - claimName: video-cache - - name: self-signed-certificate - secret: - secretName: self-signed-certificate - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "xeone3-zone" - operator: NotIn - values: - - "xeone3" diff --git a/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 b/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 index e573d3c..88c03fa 100644 --- a/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 @@ -1,4 +1,6 @@ include(platform.m4) +include(configure.m4) + apiVersion: apps/v1 kind: Deployment metadata: @@ -25,11 +27,11 @@ spec: - containerPort: 1935 resources: limits: - cpu: 4 - memory: 4000Mi + cpu: eval(defn(`CDN_CPU')*2) + memory: eval(defn(`CDN_MEMORY')*2)Mi requests: - cpu: 2 - memory: 2000Mi + cpu: defn(`CDN_CPU') + memory: defn(`CDN_MEMORY')Mi volumeMounts: - mountPath: /var/www/archive name: video-archive diff --git a/deployment/kubernetes/yaml/cdn-svc.yaml b/deployment/kubernetes/yaml/cdn-svc.yaml deleted file mode 100644 index 38794e2..0000000 --- a/deployment/kubernetes/yaml/cdn-svc.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: cdn-service - labels: - app: cdn -spec: - ports: - - port: 443 - targetPort: 8443 - name: https - - port: 1935 - protocol: TCP - name: tcp - externalIPs: - - 134.134.172.74 - selector: - app: cdn diff --git a/deployment/kubernetes/yaml/cdn-svc.yaml.m4 b/deployment/kubernetes/yaml/cdn-svc.yaml.m4 index 6d2e4c2..c285836 100644 --- a/deployment/kubernetes/yaml/cdn-svc.yaml.m4 +++ b/deployment/kubernetes/yaml/cdn-svc.yaml.m4 @@ -10,8 +10,8 @@ spec: targetPort: 8443 name: https - port: 1935 - protocol: TCP - name: tcp + targetPort: 1935 + name: rtmp externalIPs: - defn(`HOSTIP') selector: diff --git a/deployment/kubernetes/yaml/configure.m4 b/deployment/kubernetes/yaml/configure.m4 new file mode 100644 index 0000000..7bccfaf --- /dev/null +++ b/deployment/kubernetes/yaml/configure.m4 @@ -0,0 +1,44 @@ + +define(`CDN_CPU',2) +define(`CDN_MEMORY',2000) + +define(`REDIS_CPU',1) +define(`REDIS_MEMORY',500) + +define(`ZOOKEEPER_CPU',1) +define(`ZOOKEEPER_MEMORY',500) + +define(`KAFKA_CPU',1) +define(`KAFKA_MEMORY',500) + +define(`VOD_CPU',3) +define(`VOD_MEMORY',3000) + +define(`LIVE_0_CPU',4) +define(`LIVE_0_MEMORY',3000) +define(`LIVE_0_URL',bbb_sunflower_1080p_30fps_normal.mp4) + +define(`LIVE_0_0_PROTOCOL',HLS) +define(`LIVE_0_0_WIDTH',856) +define(`LIVE_0_0_HEIGHT',480) +define(`LIVE_0_0_BITRATE',8000000) +define(`LIVE_0_0_FRAMERATE',30) +define(`LIVE_0_0_GOP',100) +define(`LIVE_0_0_MAXBFRAMES',2) +define(`LIVE_0_0_REFSNUM',2) +define(`LIVE_0_0_PRESET',veryfast) +define(`LIVE_0_0_ENCODETYPE',libx264) +define(`LIVE_0_0_HWACCEL',false) + +define(`LIVE_0_1_PROTOCOL',DASH) +define(`LIVE_0_1_WIDTH',856) +define(`LIVE_0_1_HEIGHT',480) +define(`LIVE_0_1_BITRATE',8000000) +define(`LIVE_0_1_FRAMERATE',30) +define(`LIVE_0_1_GOP',100) +define(`LIVE_0_1_MAXBFRAMES',2) +define(`LIVE_0_1_REFSNUM',2) +define(`LIVE_0_1_PRESET',9) +define(`LIVE_0_1_ENCODETYPE',libsvt_hevc) +define(`LIVE_0_1_HWACCEL',false) + diff --git a/deployment/kubernetes/yaml/cpu_mem_management.cfg.m4 b/deployment/kubernetes/yaml/cpu_mem_management.cfg.m4 deleted file mode 100644 index 7ae4246..0000000 --- a/deployment/kubernetes/yaml/cpu_mem_management.cfg.m4 +++ /dev/null @@ -1,20 +0,0 @@ -# Minimum resource requirements on container runtime, the maximum resource requirements is 2x minimum. -[cdn] -cpu = defn(`CDN_CPU_REQUEST') -mem = defn(`CDN_MEM_REQUEST') - -[redis] -cpu = defn(`REDIS_CPU_REQUEST') -mem = defn(`REDIS_MEM_REQUEST') - -[zookeeper] -cpu = defn(`ZOOKEEPER_CPU_REQUEST') -mem = defn(`ZOOKEEPER_MEM_REQUEST') - -[kafka] -cpu = defn(`KAFKA_CPU_REQUEST') -mem = defn(`KAFKA_MEM_REQUEST') - -[vod] -cpu = defn(`VOD_CPU_REQUEST') -mem = defn(`VOD_MEM_REQUEST') diff --git a/deployment/kubernetes/yaml/kafka-deploy.yaml b/deployment/kubernetes/yaml/kafka-deploy.yaml deleted file mode 100644 index 649b460..0000000 --- a/deployment/kubernetes/yaml/kafka-deploy.yaml +++ /dev/null @@ -1,69 +0,0 @@ - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kafka - labels: - app: kafka -spec: - replicas: 1 - selector: - matchLabels: - app: kafka - template: - metadata: - labels: - app: kafka - spec: - enableServiceLinks: false - containers: - - name: kafka - image: wurstmeister/kafka:2.12-2.4.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9092 - env: - - name: "KAFKA_BROKER_ID" - value: "1" - - name: "KAFKA_ZOOKEEPER_CONNECT" - value: "zookeeper-service:2181" - - name: "KAFKA_LISTENERS" - value: "PLAINTEXT://:9092" - - name: "KAFKA_ADVERTISED_LISTENERS" - value: "PLAINTEXT://kafka-service:9092" - - name: "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP" - value: "PLAINTEXT:PLAINTEXT" - - name: "KAFKA_INTER_BROKER_LISTENER_NAME" - value: "PLAINTEXT" - - name: "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR" - value: "1" - - name: "KAFKA_DEFAULT_REPLICATION_FACTOR" - value: "1" - - name: "KAFKA_AUTO_CREATE_TOPICS_ENABLE" - value: "true" - - name: KAFKA_CREATE_TOPICS - value: content_provider_sched:16:1 - - name: "KAFKA_NUM_PARTITIONS" - value: "16" - - name: "KAFKA_LOG_RETENTION_MINUTES" - value: "30" - - name: "KAFKA_HEAP_OPTS" - value: "-Xmx1024m -Xms1024m" - - name: "KAFKA_LOG4J_ROOT_LOGLEVEL" - value: "ERROR" - resources: - requests: - cpu: 1 - memory: 500Mi - limits: - cpu: 2 - memory: 1000Mi - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "xeone3-zone" - operator: NotIn - values: - - "xeone3" diff --git a/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 b/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 index c2d6268..c0b6959 100644 --- a/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 @@ -1,4 +1,6 @@ include(platform.m4) +include(configure.m4) + apiVersion: apps/v1 kind: Deployment metadata: @@ -48,14 +50,14 @@ spec: - name: "KAFKA_LOG_RETENTION_MINUTES" value: "30" - name: "KAFKA_HEAP_OPTS" - value: "-Xmx1024m -Xms1024m" + value: "`-Xmx'defn(`KAFKA_MEMORY')m -`Xms'defn(`KAFKA_MEMORY')m" - name: "KAFKA_LOG4J_ROOT_LOGLEVEL" value: "ERROR" resources: requests: - cpu: 1 - memory: 500Mi + cpu: defn(`KAFKA_CPU') + memory: defn(`KAFKA_MEMORY')Mi limits: - cpu: 2 - memory: 1000Mi + cpu: eval(defn(`KAFKA_CPU')*2) + memory: defn(`KAFKA_MEMORY')Mi PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/yaml/kafka-svc.yaml b/deployment/kubernetes/yaml/kafka-svc.yaml deleted file mode 100644 index 6adfca0..0000000 --- a/deployment/kubernetes/yaml/kafka-svc.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: kafka-service - labels: - app: kafka -spec: - ports: - - port: 9092 - protocol: TCP - selector: - app: kafka diff --git a/deployment/kubernetes/yaml/live-0-deploy.yaml b/deployment/kubernetes/yaml/live-0-deploy.yaml deleted file mode 100644 index 9a29d80..0000000 --- a/deployment/kubernetes/yaml/live-0-deploy.yaml +++ /dev/null @@ -1,56 +0,0 @@ - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: live-0 - labels: - app: live-0 -spec: - replicas: 1 - selector: - matchLabels: - app: live-0 - template: - metadata: - labels: - app: live-0 - spec: - enableServiceLinks: false - containers: - - name: live-0 - image: jfsxw001.jf.intel.com:10443/ovc_software_transcode_service:latest - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 8 - memory: 6000Mi - requests: - cpu: 4 - memory: 3000Mi - env: - - name: NO_PROXY - value: "*" - - name: no_proxy - value: "*" - volumeMounts: - - mountPath: /var/www/video - name: video-cache - - mountPath: /var/www/archive - name: video-archive - readOnly: true - volumes: - - name: video-cache - persistentVolumeClaim: - claimName: video-cache - - name: video-archive - persistentVolumeClaim: - claimName: video-archive - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "xeone3-zone" - operator: NotIn - values: - - "xeone3" diff --git a/deployment/kubernetes/yaml/live-deploy.yaml.m4 b/deployment/kubernetes/yaml/live-deploy.yaml.m4 index 981cd00..ef0b22d 100644 --- a/deployment/kubernetes/yaml/live-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/live-deploy.yaml.m4 @@ -1,48 +1,68 @@ +include(../../../script/loop.m4) +include(configure.m4) include(platform.m4) + +loopifdef(LIDX,0,``LIVE_'defn(`LIDX')_CPU',`ifelse(eval(defn(`LIDX') 0: - pods.append("vod") - pods_dict["vod"] = {} - -if int(NLIVES) > 0: - live_transcode_cfg = DIRS + '/live-transcode.cfg' - live_trans_cfg_dict = get_config(live_transcode_cfg) - configure_live_transcode_service(NLIVES, live_trans_cfg_dict) - -for pod in pods: - print(pod); - pods_dict = get_request_cpu(pod, pods_dict, cpu_mem_cfg_dict[pod]['cpu']) - pods_dict = get_request_mem(pod, pods_dict, cpu_mem_cfg_dict[pod]['mem']) - -update_yaml.update_yaml(sys.argv[1], pods, pods_dict, live_trans_cfg_dict) diff --git a/deployment/kubernetes/yaml/start.sh b/deployment/kubernetes/yaml/start.sh index 2425d2d..cccd4ae 100755 --- a/deployment/kubernetes/yaml/start.sh +++ b/deployment/kubernetes/yaml/start.sh @@ -1,100 +1,21 @@ #!/bin/bash -e DIR=$(dirname $(readlink -f "$0")) -export NGINX_LOG_VOLUME=$(readlink -f "/var/log/nginx") +NVODS="${2:-1}" +NLIVES="${3:-1}" +REGISTRY="$4" # Set Bash color ECHO_PREFIX_INFO="\033[1;32;40mINFO...\033[0;0m" ECHO_PREFIX_ERROR="\033[1;31;40mError...\033[0;0m" -# Try command for test command result. -function try_command { - "$@" - status=$? - if [ $status -ne 0 ]; then - echo -e $ECHO_PREFIX_ERROR "ERROR with \"$@\", Return status $status." - exit $status - fi - return $status -} - function create_secret { - kubectl create secret generic self-signed-certificate "--from-file=${DIR}/../certificate/self.crt" "--from-file=${DIR}/../certificate/self.key" + kubectl create secret generic self-signed-certificate "--from-file=${DIR}/../../certificate/self.crt" "--from-file=${DIR}/../../certificate/self.key" } -# This script must be run as root -if [[ $EUID -ne 0 ]]; then - echo -e $ECHO_PREFIX_ERROR "This script must be run as root!" 1>&2 - exit 1 -fi - -try_command hash kubectl > /dev/null - -for i in $(find "$DIR" -maxdepth 1 -name "*svc.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-svc.yaml//') - for j in $(kubectl get svc | awk '{print $1}' | sed -n '2, $p' | grep -v 'kubernetes' | awk -F '-' '{print $1}'); do - if [ "$i1" == "$j" ]; then - kubectl delete -f "$i" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*deploy.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-deploy.yaml//') - for j in $(kubectl get pod | awk '{print $1}' | sed -n '2, $p' | awk -F '-' '{$NF=""; $(NF-1)=""; gsub(" ", "");gsub(" ", "-"); print}' | uniq); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*pvc.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-pvc.yaml//') - for j in $(kubectl get pvc | awk '{print $1}' | sed -n '2, $p'); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*pv.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-pv.yaml//') - for j in $(kubectl get pv | awk '{print $1}' | sed -n '2, $p'); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*certificates.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/.yaml//') - for j in $(kubectl get secret | awk '{print $1}' | sed -n '2, $p' | grep -v 'default-token'); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done -done - -rm -rf $DIR/../../volume/video/hls/* -rm -rf $DIR/../../volume/video/dash/* - -sudo mkdir -p "${NGINX_LOG_VOLUME}" - -NVODS="${2:-1}" -NLIVES="${3:-1}" -echo "Generating yamls with NVODS=${NVODS}, NLIVES=${NLIVES}" -NODES="$(kubectl get node | awk '{print $1}' | sed -n '2, $p')" -DESCRIPTIONS="$(kubectl get node --no-headers -o custom-columns=NAME:metadata.name,CPU:status.capacity.cpu,MEM:status.capacity.memory)" -"$DIR/run_with_command.py" "$DIR" ${NVODS} ${NLIVES} "$NODES" "$DESCRIPTIONS" - -"$DIR/../certificate/self-sign.sh" +"$DIR/../../certificate/self-sign.sh" create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret) -for i in $(find "$DIR" -maxdepth 1 -name "*.yaml"); do +for i in $(find "$DIR" "$DIR/.." -maxdepth 1 -name "*.yaml"); do kubectl apply -f "$i" done diff --git a/deployment/kubernetes/yaml/stop.sh b/deployment/kubernetes/yaml/stop.sh index 543196b..5a37ffd 100755 --- a/deployment/kubernetes/yaml/stop.sh +++ b/deployment/kubernetes/yaml/stop.sh @@ -1,4 +1,4 @@ -#!/bin/bash -e +#!/bin/bash DIR=$(dirname $(readlink -f "$0")) EXT=*.yaml @@ -18,55 +18,10 @@ function try_command { return $status } -# This script must be run as root -if [[ $EUID -ne 0 ]]; then - echo -e $ECHO_PREFIX_ERROR "This script must be run as root!" 1>&2 - exit 1 -fi - try_command hash kubectl > /dev/null -for i in $(find "$DIR" -maxdepth 1 -name "*svc.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-svc.yaml//') - for j in $(kubectl get svc | awk '{print $1}' | sed -n '2, $p' | grep -v 'kubernetes' | awk -F '-' '{print $1}'); do - if [ "$i1" == "$j" ]; then - kubectl delete -f "$i" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*deploy.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-deploy.yaml//') - for j in $(kubectl get pod | awk '{print $1}' | sed -n '2, $p' | awk -F '-' '{$NF=""; $(NF-1)=""; gsub(" ", "");gsub(" ", "-"); print}' | uniq); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*pvc.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-pvc.yaml//') - for j in $(kubectl get pvc | awk '{print $1}' | sed -n '2, $p'); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done -done - -for i in $(find "$DIR" -maxdepth 1 -name "*pv.yaml"); do - len=$(echo $DIR | wc -m) - i1=$(echo ${i:${len}} | sed 's/-pv.yaml//') - for j in $(kubectl get pv | awk '{print $1}' | sed -n '2, $p'); do - if [ ${i1} == ${j} ]; then - kubectl delete -f "${i}" - fi - done +for i in $(find "$DIR" "$DIR/.." -maxdepth 1 -name "*.yaml"); do + kubectl delete --wait=false -f "$i" done kubectl delete secret self-signed-certificate 2> /dev/null || echo -n "" - -rm -rf $DIR/../../volume/video/hls/* -rm -rf $DIR/../../volume/video/dash/* diff --git a/deployment/kubernetes/yaml/update_yaml.py b/deployment/kubernetes/yaml/update_yaml.py deleted file mode 100755 index 498b2ef..0000000 --- a/deployment/kubernetes/yaml/update_yaml.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python3 - -import os -import re -import sys -import socket -import functools -import yaml_utils - -def get_host_ip(): - try: - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.connect(('8.8.8.8', 80)) - host_ip = s.getsockname()[0] - finally: - s.close() - return host_ip - -def update_yaml(dir_path, pods, pods_dict,trans_cfg_dict): - host_ip = get_host_ip() - sys.path.append(dir_path) - - if re.search("live\d+", str(pods)): - print("\n\033[0;31;40mThe live video playlist URL are below:\033[0m") - for pod in pods: - limit_cpu = 2 * float(pods_dict[pod]["cpu"]) - limit_memory = str(2 * int(str(pods_dict[pod]["memory"])[0:-2])) + str(pods_dict[pod]["memory"])[-2:] - - yaml_file = os.path.join(dir_path, re.findall( - "([A-Za-z]+-*\d*$)", pod)[0] + "-deploy.yaml") - data = yaml_utils.load_yaml_file(yaml_file) - data = yaml_utils.update_resource_quotas( - data, pods_dict[pod]["cpu"], limit_cpu, pods_dict[pod]["memory"], limit_memory) - - if re.search("((vod)|(live-))\d+", pod): - yaml_file = os.path.join(dir_path, pod + "-deploy.yaml") - if re.search("live-\d", pod): - live_args = { - 'input_video': pods_dict[pod]["input"], "output_dict": {}} - if trans_cfg_dict[pod]['hwaccel'] == 'false': - if trans_cfg_dict[pod]['protocol'] == 'DASH': - if trans_cfg_dict[pod]['encodetype'] == 'AVC' or trans_cfg_dict[pod]['encodetype'] == 'HEVC' or trans_cfg_dict[pod]['encodetype'] == 'AV1': - codec_dict = {"AVC": "libx264", "HEVC": "libsvt_hevc", "AV1": "libsvt_av1"} - else: - print("Error: Only support AVC/HEVC/AV1! Please input correct encoder_type in transcode.cfg (" + pod + ")") - os._exit() - elif trans_cfg_dict[pod]['protocol'] == 'HLS': - if trans_cfg_dict[pod]['encodetype'] == 'AVC' or trans_cfg_dict[pod]['encodetype'] == 'HEVC': - codec_dict = {"AVC": "libx264", "HEVC": "libsvt_hevc"} - else: - print("Error: Only support AVC/HEVC! Please input correct encoder_type in transcode.cfg (" + pod + ")") - os._exit() - else: - print("Error: Please input correct protocol(HLS/DASH) in transcode.cfg (" + pod + ")") - os._exit() - elif trans_cfg_dict[pod]['hwaccel'] == 'true': - if trans_cfg_dict[pod]['encodetype'] == 'AVC' or trans_cfg_dict[pod]['encodetype'] == 'HEVC': - codec_dict = {"AVC": "h264_vaapi", "HEVC": "hevc_vaapi"} - else: - print("Error: Only support AVC/HEVC! Please input correct encoder_type in transcode.cfg (" + pod + ")") - os._exit() - for num in range(4): - if pods_dict[pod].get("transcode" + str(num), None) and pods_dict[pod]["transcode" + str(num)].get("protocol", None) and pods_dict[pod]["transcode" + str(num)].get("resolution", None) and pods_dict[pod]["transcode" + str(num)].get("bitrate", None) and pods_dict[pod]["transcode" + str(num)].get("codec", None) and pods_dict[pod]["transcode" + str(num)].get("output", None): - - live_args["output_dict"][pods_dict[pod]["transcode" + str(num)]["output"] + "_" + re.search("live-(\d+)", pod).group(1) + "_" + str(num)] = [pods_dict[pod]["transcode" + str(num)]["protocol"].lower(), pods_dict[pod]["transcode" + str(num)]["resolution"], pods_dict[pod]["transcode" + str(num)]["bitrate"], codec_dict[pods_dict[pod]["transcode" + str(num)]["codec"]], pods_dict[pod]["transcode" + str(num)]["framerate"], pods_dict[pod]["transcode" + str(num)]["gop"], pods_dict[pod]["transcode" + str(num)]["maxbFrames"], pods_dict[pod]["transcode" + str(num)]["refsNum"], pods_dict[pod]["transcode" + str(num)]["preset"]] - - print("\033[0;31;40mhttps://%s/%s/%s/index.%s\033[0m" % (host_ip, pods_dict[pod]["transcode" + str(num)]["protocol"].lower(), pods_dict[pod]["transcode" + str(num)] - ["output"] + "_" + re.search("live-(\d+)", pod).group(1) + "_" + str(num), "m3u8" if pods_dict[pod]["transcode" + str(num)]["protocol"].lower() == "hls" else "mpd")) - data = yaml_utils.update_command( - data, trans_cfg_dict[pod]['hwaccel'], live_args) - - yaml_utils.dump_yaml_file(data, yaml_file) diff --git a/deployment/kubernetes/yaml/video-archive-pvc.yaml b/deployment/kubernetes/yaml/video-archive-pvc.yaml deleted file mode 100644 index 5a561a5..0000000 --- a/deployment/kubernetes/yaml/video-archive-pvc.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: video-archive -spec: - accessModes: - - ReadOnlyMany - storageClassName: video-archive - resources: - requests: - storage: 2Gi diff --git a/deployment/kubernetes/yaml/video-archive-pvc.yaml.m4 b/deployment/kubernetes/yaml/video-archive-pvc.yaml.m4 index f18483f..b2c065f 100644 --- a/deployment/kubernetes/yaml/video-archive-pvc.yaml.m4 +++ b/deployment/kubernetes/yaml/video-archive-pvc.yaml.m4 @@ -5,7 +5,7 @@ metadata: name: video-archive spec: accessModes: - - ReadOnlyMany + - ReadWriteMany storageClassName: video-archive resources: requests: diff --git a/deployment/kubernetes/yaml/video-cache-pvc.yaml b/deployment/kubernetes/yaml/video-cache-pvc.yaml deleted file mode 100644 index 31fbd79..0000000 --- a/deployment/kubernetes/yaml/video-cache-pvc.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: video-cache -spec: - accessModes: - - ReadWriteMany - storageClassName: video-cache - resources: - requests: - storage: 2Gi diff --git a/deployment/kubernetes/yaml/vod-deploy.yaml b/deployment/kubernetes/yaml/vod-deploy.yaml deleted file mode 100644 index 8ca209c..0000000 --- a/deployment/kubernetes/yaml/vod-deploy.yaml +++ /dev/null @@ -1,56 +0,0 @@ - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: vod - labels: - app: vod -spec: - replicas: 2 - selector: - matchLabels: - app: vod - template: - metadata: - labels: - app: vod - spec: - enableServiceLinks: false - containers: - - name: vod - image: jfsxw001.jf.intel.com:10443/ovc_software_transcode_service:latest - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 6 - memory: 6000Mi - requests: - cpu: 3 - memory: 3000Mi - env: - - name: NO_PROXY - value: "*" - - name: no_proxy - value: "*" - volumeMounts: - - mountPath: /var/www/video - name: video-cache - - mountPath: /var/www/archive - name: video-archive - readOnly: true - volumes: - - name: video-cache - persistentVolumeClaim: - claimName: video-cache - - name: video-archive - persistentVolumeClaim: - claimName: video-archive - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "xeone3-zone" - operator: NotIn - values: - - "xeone3" diff --git a/deployment/kubernetes/yaml/vod-deploy.yaml.m4 b/deployment/kubernetes/yaml/vod-deploy.yaml.m4 index 7977518..e131cad 100644 --- a/deployment/kubernetes/yaml/vod-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/vod-deploy.yaml.m4 @@ -1,4 +1,6 @@ include(platform.m4) +include(configure.m4) + apiVersion: apps/v1 kind: Deployment metadata: @@ -22,11 +24,11 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 6 - memory: 6000Mi + cpu: eval(defn(`VOD_CPU')*2) + memory: eval(defn(`VOD_MEMORY')*2)Mi requests: - cpu: 3 - memory: 3000Mi + cpu: defn(`VOD_CPU') + memory: defn(`VOD_MEMORY')Mi env: - name: NO_PROXY value: "*" diff --git a/deployment/kubernetes/yaml/yaml_utils.py b/deployment/kubernetes/yaml/yaml_utils.py deleted file mode 100755 index bb91f47..0000000 --- a/deployment/kubernetes/yaml/yaml_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/python3 - -from ruamel import yaml - -def load_yaml_file(fileName): - with open(fileName, 'r', encoding='utf8') as infile: - data = yaml.load(infile, Loader=yaml.RoundTripLoader) - return data - -def dump_yaml_file(data, fileName): - with open(fileName, 'w', encoding='utf8') as outfile: - yaml.dump( - data, - outfile, - Dumper=yaml.RoundTripDumper, - default_flow_style=False, - allow_unicode=True) - -def update_command(data, bisHW, live_args): - data['spec']['template']['spec']['containers'][0]['lifecycle'] = { 'preStop': { 'exec': {'command': [ 'rm', 'rf' ] } } } - scale_dict = {'false': 'scale', 'true': 'scale_vaapi'} - if bisHW == "true": - command = 'ffmpeg -re -stream_loop -1 -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -hwaccel_output_format vaapi -i /var/www/archive/' + live_args['input_video'] - else: - command = 'ffmpeg -re -stream_loop -1 -i /var/www/archive/' + live_args['input_video'] - for key, value in live_args['output_dict'].items(): - data['spec']['template']['spec']['containers'][0]['lifecycle']['preStop']['exec']['command'].append( " /var/www/" + value[0] + '/' + key ) - thread = " -thread_count 96" if value[3].find('libsvt') != -1 else "" - command += ' -vf ' + scale_dict[bisHW] + '=' + value[1] + ' -c:v ' + value[3] + ' -b:v ' + value[2] + ' -r ' + value[4] + ' -g ' + value[5] + ' -bf ' + value[6] + ' -refs ' + value[7] + ' -preset ' + value[8] + ' -forced-idr 1' + thread + ' -an -f flv rtmp://cdn-service/' + value[0] + '/' + key - - command_caps = ['bash', '-c', command + ' -abr_pipeline'] - data['spec']['template']['spec']['containers'][0].update( - {'args': command_caps}) - return data - -def update_resource_quotas( - data, request_cpu, limit_cpu, request_memory, limit_memory): - data["spec"]["template"]["spec"]["containers"][0]["resources"]["requests"] = { - "cpu": str(int(float(request_cpu) * 1000)) + "m", - "memory": str(request_memory) - } - data["spec"]["template"]["spec"]["containers"][0]["resources"]["limits"] = { - "cpu": str(int(float(limit_cpu) * 1000)) + "m", - "memory": str(limit_memory) - } - return data diff --git a/deployment/kubernetes/yaml/zookeeper-deploy.yaml b/deployment/kubernetes/yaml/zookeeper-deploy.yaml deleted file mode 100644 index aa8bdb6..0000000 --- a/deployment/kubernetes/yaml/zookeeper-deploy.yaml +++ /dev/null @@ -1,49 +0,0 @@ - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: zookeeper - labels: - app: zookeeper -spec: - replicas: 1 - selector: - matchLabels: - app: zookeeper - template: - metadata: - labels: - app: zookeeper - spec: - enableServiceLinks: false - containers: - - name: zookeeper - image: zookeeper:3.5.6 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 2181 - env: - - name: "ZOO_TICK_TIME" - value: "10000" - - name: "ZOO_MAX_CLIENT_CNXNS" - value: "160000" - - name: "ZOO_AUTOPURGE_PURGEINTERVAL" - value: "1" - - name: "ZOO_LOG4J_PROP" - value: "ERROR" - resources: - requests: - cpu: 1 - memory: 500Mi - limits: - cpu: 2 - memory: 1000Mi - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: "xeone3-zone" - operator: NotIn - values: - - "xeone3" diff --git a/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 b/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 index fd937eb..5b6668e 100644 --- a/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 @@ -1,4 +1,6 @@ include(platform.m4) +include(configure.m4) + apiVersion: apps/v1 kind: Deployment metadata: @@ -33,9 +35,9 @@ spec: value: "ERROR" resources: requests: - cpu: 1 - memory: 500Mi + cpu: defn(`ZOOKEEPER_CPU') + memory: defn(`ZOOKEEPER_MEMORY')Mi limits: - cpu: 2 - memory: 1000Mi + cpu: eval(defn(`ZOOKEEPER_CPU')*2) + memory: eval(defn(`ZOOKEEPER_MEMORY')*2)Mi PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/yaml/zookeeper-svc.yaml b/deployment/kubernetes/yaml/zookeeper-svc.yaml deleted file mode 100644 index b89c392..0000000 --- a/deployment/kubernetes/yaml/zookeeper-svc.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: zookeeper-service - labels: - app: zookeeper -spec: - ports: - - port: 2181 - protocol: TCP - selector: - app: zookeeper diff --git a/script/loop.m4 b/script/loop.m4 new file mode 100644 index 0000000..7322fbf --- /dev/null +++ b/script/loop.m4 @@ -0,0 +1,3 @@ +define(`loop',`ifelse(eval($2<=$3),1,`pushdef(`$1',$2)$4`'loop(`$1',incr($2),$3,`$4')popdef(`$1')')')dnl +define(`loopifdef',`pushdef(`$1',$2)ifdef($3,`$4loopifdef(`$1',incr($2),`$3',`$4')')popdef(`$1')')dnl +define(`looplist',`pushdef(`$1',regexp($2,`\(\w+\)',`\1'))ifelse(regexp($2,`\w+'),-1,,`$3looplist(`$1',regexp($2,`\w+[/ ]*\(.*\)',`\1'),`$3')')popdef(`$1')')dnl diff --git a/script/nfs_setup.sh b/script/nfs_setup.sh index c276f68..c388caf 100755 --- a/script/nfs_setup.sh +++ b/script/nfs_setup.sh @@ -33,10 +33,8 @@ else fi # Set up NFS -try_command echo -e "$PWD/../volume/video/archive *(rw,sync,no_root_squash,no_all_squash,no_subtree_check)" > /etc/exports -try_command echo -e "$PWD/../volume/video/dash *(rw,sync,no_root_squash,no_all_squash,no_subtree_check)" >> /etc/exports -try_command echo -e "$PWD/../volume/video/hls *(rw,sync,no_root_squash,no_all_squash,no_subtree_check)" >> /etc/exports -try_command echo -e "$PWD/../volume/html *(rw,sync,no_root_squash,no_all_squash,no_subtree_check)" >> /etc/exports +try_command echo -e "$PWD/../volume/video/archive *(ro,sync,no_root_squash,no_all_squash,no_subtree_check)" > /etc/exports +try_command echo -e "$PWD/../volume/video/cache *(rw,sync,no_root_squash,no_all_squash,no_subtree_check)" >> /etc/exports try_command lsb_release -si > /dev/null diff --git a/streaming-server/Dockerfile b/streaming-server/Dockerfile index f397b65..8936362 100644 --- a/streaming-server/Dockerfile +++ b/streaming-server/Dockerfile @@ -1,10 +1,17 @@ FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.3.1 - -RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends vim && \ - rm -rf /var/lib/apt/lists/* - COPY *.conf /etc/nginx/ -CMD ["/bin/bash","-c","/usr/sbin/nginx"] +CMD ["/usr/sbin/nginx"] WORKDIR /home -EXPOSE 80 1935 + +#### +ARG UID +ARG GID +## must use ; here to ignore user exist status code +RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \ + [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \ + touch /var/run/nginx.pid && \ + mkdir -p /var/log/nginx /var/lib/nginx /var/www/video /var/www/archive && \ + chown -R ${UID}:${GID} /var/run/nginx.pid /var/www /var/log/nginx /var/lib/nginx +USER ${UID} +#### diff --git a/xcode-server/hardware/Dockerfile b/xcode-server/hardware/Dockerfile index ea201d5..cc3caf7 100644 --- a/xcode-server/hardware/Dockerfile +++ b/xcode-server/hardware/Dockerfile @@ -1,9 +1,18 @@ FROM openvisualcloud/xeone3-ubuntu1804-media-ffmpeg:20.3.1 -RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-tornado python3-kafka python3-kazoo vim && rm -rf /var/lib/apt/lists/* +RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-tornado python3-kafka python3-kazoo && rm -rf /var/lib/apt/lists/* COPY *.py /home/ - -CMD ["/bin/bash","-c","/home/main.py"] +CMD ["/home/main.py"] WORKDIR /home + +#### +ARG UID +ARG GID +## must use ; here to ignore user exist status code +RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \ + [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \ + chown -R ${UID}:${GID} /home +USER ${UID} +#### diff --git a/xcode-server/software/Dockerfile b/xcode-server/software/Dockerfile index ebc651c..f6c06f7 100644 --- a/xcode-server/software/Dockerfile +++ b/xcode-server/software/Dockerfile @@ -1,9 +1,16 @@ FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:20.3.1 - -RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-tornado python3-kafka python3-kazoo vim && rm -rf /var/lib/apt/lists/* - +RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-tornado python3-kafka python3-kazoo && rm -rf /var/lib/apt/lists/* COPY *.py /home/ - -CMD ["/bin/bash","-c","/home/main.py"] +CMD ["/home/main.py"] WORKDIR /home + +#### +ARG UID +ARG GID +## must use ; here to ignore user exist status code +RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \ + [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \ + chown -R ${UID}:${GID} /home +USER ${UID} +#### From 91d95246410c2f054700f15d9962e4ac11994f1b Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 25 May 2020 20:14:16 -0700 Subject: [PATCH 22/91] fix k8s --- README.md | 1 + deployment/kubernetes/yaml/configure.m4 | 12 ++++++------ deployment/kubernetes/yaml/stop.sh | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 907feb8..9ef89ec 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,7 @@ If you deploy the sample to a cluster, please configure the sample, as `cmake -D Start CDN transcode service ``` +make volume make start_kubernetes ``` **Tips:** [Configuration example for Kubernetes deploy](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Configuration-example-for-Kubernetes-deploy) diff --git a/deployment/kubernetes/yaml/configure.m4 b/deployment/kubernetes/yaml/configure.m4 index 7bccfaf..239ebda 100644 --- a/deployment/kubernetes/yaml/configure.m4 +++ b/deployment/kubernetes/yaml/configure.m4 @@ -11,14 +11,14 @@ define(`ZOOKEEPER_MEMORY',500) define(`KAFKA_CPU',1) define(`KAFKA_MEMORY',500) -define(`VOD_CPU',3) -define(`VOD_MEMORY',3000) +define(`VOD_CPU',1) +define(`VOD_MEMORY',2000) -define(`LIVE_0_CPU',4) -define(`LIVE_0_MEMORY',3000) +define(`LIVE_0_CPU',1) +define(`LIVE_0_MEMORY',2000) define(`LIVE_0_URL',bbb_sunflower_1080p_30fps_normal.mp4) -define(`LIVE_0_0_PROTOCOL',HLS) +define(`LIVE_0_0_PROTOCOL',hls) define(`LIVE_0_0_WIDTH',856) define(`LIVE_0_0_HEIGHT',480) define(`LIVE_0_0_BITRATE',8000000) @@ -30,7 +30,7 @@ define(`LIVE_0_0_PRESET',veryfast) define(`LIVE_0_0_ENCODETYPE',libx264) define(`LIVE_0_0_HWACCEL',false) -define(`LIVE_0_1_PROTOCOL',DASH) +define(`LIVE_0_1_PROTOCOL',dash) define(`LIVE_0_1_WIDTH',856) define(`LIVE_0_1_HEIGHT',480) define(`LIVE_0_1_BITRATE',8000000) diff --git a/deployment/kubernetes/yaml/stop.sh b/deployment/kubernetes/yaml/stop.sh index 5a37ffd..c56f74f 100755 --- a/deployment/kubernetes/yaml/stop.sh +++ b/deployment/kubernetes/yaml/stop.sh @@ -21,7 +21,7 @@ function try_command { try_command hash kubectl > /dev/null for i in $(find "$DIR" "$DIR/.." -maxdepth 1 -name "*.yaml"); do - kubectl delete --wait=false -f "$i" + kubectl delete --wait=false -f "$i" 2> /dev/null done kubectl delete secret self-signed-certificate 2> /dev/null || echo -n "" From 78d420dd8b736c4d7d887f3f577584f3fdeb4023 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 25 May 2020 22:07:32 -0700 Subject: [PATCH 23/91] docker swarm only --- deployment/kubernetes/build.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh index 2718c88..60debfa 100755 --- a/deployment/kubernetes/build.sh +++ b/deployment/kubernetes/build.sh @@ -5,6 +5,10 @@ DIR=$(dirname $(readlink -f "$0")) rm -rf "$DIR/../../volume/video/cache" mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash" +if [ ! -x /usr/bin/kubectl ] && [ ! -x /usr/local/bin/kubectl ]; then + exit 0 +fi + hosts=($(kubectl get node -l xeone3-zone!=yes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}:{range @.spec.taints[*]}{@.key}={@.effect};{end}{end}' | grep Ready=True | grep -v NoSchedule | cut -f1 -d':')) if test ${#hosts[@]} -eq 0; then From 726beb37699882865a8f89d78955134718a6433f Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 25 May 2020 22:50:24 -0700 Subject: [PATCH 24/91] guard kubectl & helm --- deployment/kubernetes/build.sh | 5 ++--- deployment/kubernetes/helm/build.sh | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh index 60debfa..5d63e1f 100755 --- a/deployment/kubernetes/build.sh +++ b/deployment/kubernetes/build.sh @@ -5,9 +5,8 @@ DIR=$(dirname $(readlink -f "$0")) rm -rf "$DIR/../../volume/video/cache" mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash" -if [ ! -x /usr/bin/kubectl ] && [ ! -x /usr/local/bin/kubectl ]; then - exit 0 -fi +# make sure kubectl is functional +kubectl get node >/dev/null 2>/dev/null || exit 0 hosts=($(kubectl get node -l xeone3-zone!=yes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}:{range @.spec.taints[*]}{@.key}={@.effect};{end}{end}' | grep Ready=True | grep -v NoSchedule | cut -f1 -d':')) diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh index d835d43..0bd8545 100755 --- a/deployment/kubernetes/helm/build.sh +++ b/deployment/kubernetes/helm/build.sh @@ -6,9 +6,8 @@ NLIVES="${2:-1}" REGISTRY="$3" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') -if [ ! -x /usr/bin/helm ] && [ ! -x /usr/local/bin/helm ]; then - exit 0 -fi +# make sure helm is functional +helm version >/dev/null 2>/dev/null || exit 0 echo "Generating helm chart" . "${DIR}/../volume-info.sh" From 651a109d37873126310e704bf76b5da995e17507 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Thu, 28 May 2020 18:25:35 -0700 Subject: [PATCH 25/91] fix helm & cdn user --- cdn-server/nginx.conf | 1 - deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cdn-server/nginx.conf b/cdn-server/nginx.conf index 61f8508..64b50a1 100644 --- a/cdn-server/nginx.conf +++ b/cdn-server/nginx.conf @@ -1,5 +1,4 @@ -user docker:docker; worker_processes auto; worker_rlimit_nofile 8192; daemon off; diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index 643f127..fe950b4 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -1,5 +1,5 @@ -registryPrefix: "" +registryPrefix: "defn(`REGISTRY_PREFIX')" zookeeper: heapSize: 1024m @@ -45,3 +45,4 @@ volume: size: defn(`VIDEO_ARCHIVE_VOLUME_SIZE') cache: size: defn(`VIDEO_CACHE_VOLUME_SIZE') + From 90f26459ab66f17144b2682e01151732fb628f0c Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Fri, 29 May 2020 07:53:17 -0700 Subject: [PATCH 26/91] fix script to discover kubernete workers --- deployment/kubernetes/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh index 5d63e1f..a1b6515 100755 --- a/deployment/kubernetes/build.sh +++ b/deployment/kubernetes/build.sh @@ -8,7 +8,7 @@ mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash # make sure kubectl is functional kubectl get node >/dev/null 2>/dev/null || exit 0 -hosts=($(kubectl get node -l xeone3-zone!=yes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}:{range @.spec.taints[*]}{@.key}={@.effect};{end}{end}' | grep Ready=True | grep -v NoSchedule | cut -f1 -d':')) +hosts=($(kubectl get node -l vcac-zone!=yes -o custom-columns=NAME:metadata.name,STATUS:status.conditions[-1].type,TAINT:spec.taints | grep " Ready " | grep -v "NoSchedule" | cut -f1 -d' ')) if test ${#hosts[@]} -eq 0; then printf "\nFailed to locate worker node(s) for shared storage\n\n" From 373e011e66c0c559ec3880eb7883a06518a3a496 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 8 Jun 2020 14:52:31 -0700 Subject: [PATCH 27/91] fix arch diagram --- README.md | 2 +- .../image => doc}/CDN-Transcode-Sample-Arch.png | Bin .../image => doc}/CDN-Transcode-Sample-Arch.vsdx | Bin 3 files changed, 1 insertion(+), 1 deletion(-) rename {cdn-server/html/image => doc}/CDN-Transcode-Sample-Arch.png (100%) rename {cdn-server/html/image => doc}/CDN-Transcode-Sample-Arch.vsdx (100%) diff --git a/README.md b/README.md index 9ef89ec..2e77ddf 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ The CDN Transcode Sample is an Open Visual Cloud software stack with all require The sample implements a reference server-side transcode system over CDN infrastructure, which features `live streaming` and `VOD`. - + # What's in this project The CDN Transcode Sample contains below components: diff --git a/cdn-server/html/image/CDN-Transcode-Sample-Arch.png b/doc/CDN-Transcode-Sample-Arch.png similarity index 100% rename from cdn-server/html/image/CDN-Transcode-Sample-Arch.png rename to doc/CDN-Transcode-Sample-Arch.png diff --git a/cdn-server/html/image/CDN-Transcode-Sample-Arch.vsdx b/doc/CDN-Transcode-Sample-Arch.vsdx similarity index 100% rename from cdn-server/html/image/CDN-Transcode-Sample-Arch.vsdx rename to doc/CDN-Transcode-Sample-Arch.vsdx From 19f08955c4cdca6e1d77e4d6a2be5dc3e462ac60 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 8 Jun 2020 21:07:10 -0700 Subject: [PATCH 28/91] fix nlives=0 deployment issue --- .../templates/live-service-deployment.yaml | 4 +--- .../kubernetes/yaml/cpu_mem_management.cfg | 24 ------------------- .../kubernetes/yaml/live-deploy.yaml.m4 | 6 ++--- deployment/kubernetes/yaml/live-transcode.cfg | 14 ----------- 4 files changed, 4 insertions(+), 44 deletions(-) delete mode 100644 deployment/kubernetes/yaml/cpu_mem_management.cfg delete mode 100644 deployment/kubernetes/yaml/live-transcode.cfg diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml index 7915d02..bfd8cba 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml @@ -1,6 +1,5 @@ {{- range $i,$v1 := .Values.liveTranscode.streams }} -{{- if lt (int $i) (int $.Values.liveTranscode.replicas) }} apiVersion: apps/v1 kind: Deployment @@ -12,7 +11,7 @@ spec: selector: matchLabels: app: live-service-{{ $i }} - replicas: 1 + replicas: {{if lt (int $i) (int $.Values.liveTranscode.replicas)}}1{{else}}0{{end}} template: metadata: creationTimestamp: null @@ -68,4 +67,3 @@ spec: --- {{- end }} -{{- end }} diff --git a/deployment/kubernetes/yaml/cpu_mem_management.cfg b/deployment/kubernetes/yaml/cpu_mem_management.cfg deleted file mode 100644 index 9406fb7..0000000 --- a/deployment/kubernetes/yaml/cpu_mem_management.cfg +++ /dev/null @@ -1,24 +0,0 @@ -# Minimum resource requirements on container runtime, the maximum resource requirements is 2x minimum. -[cdn] -cpu = 2 -mem = 2000Mi - -[redis] -cpu = 1 -mem = 500Mi - -[zookeeper] -cpu = 1 -mem = 500Mi - -[kafka] -cpu = 1 -mem = 500Mi - -[vod] -cpu = 3 -mem = 3000Mi - -[live-0] -cpu = 4 -mem = 3000Mi diff --git a/deployment/kubernetes/yaml/live-deploy.yaml.m4 b/deployment/kubernetes/yaml/live-deploy.yaml.m4 index ef0b22d..cedc44f 100644 --- a/deployment/kubernetes/yaml/live-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/live-deploy.yaml.m4 @@ -2,7 +2,7 @@ include(../../../script/loop.m4) include(configure.m4) include(platform.m4) -loopifdef(LIDX,0,``LIVE_'defn(`LIDX')_CPU',`ifelse(eval(defn(`LIDX') Date: Wed, 10 Jun 2020 01:07:44 +0800 Subject: [PATCH 29/91] one adaptation_sets for all the resolution in dash mode --- xcode-server/hardware/abr_hls_dash.py | 2 +- xcode-server/software/abr_hls_dash.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xcode-server/hardware/abr_hls_dash.py b/xcode-server/hardware/abr_hls_dash.py index bef88a9..38f56da 100755 --- a/xcode-server/hardware/abr_hls_dash.py +++ b/xcode-server/hardware/abr_hls_dash.py @@ -55,7 +55,7 @@ def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", - str(segment_target_duration)] + str(segment_target_duration), "-adaptation_sets", "id=0,streams=v"] cmd_hls = ["-hls_time", str(segment_target_duration), "-hls_list_size", "0"] cmd_fade_in_out = ["-an"] diff --git a/xcode-server/software/abr_hls_dash.py b/xcode-server/software/abr_hls_dash.py index bef88a9..38f56da 100755 --- a/xcode-server/software/abr_hls_dash.py +++ b/xcode-server/software/abr_hls_dash.py @@ -55,7 +55,7 @@ def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", - str(segment_target_duration)] + str(segment_target_duration), "-adaptation_sets", "id=0,streams=v"] cmd_hls = ["-hls_time", str(segment_target_duration), "-hls_list_size", "0"] cmd_fade_in_out = ["-an"] From 2c591a86d60b5f9144fd8fcce9e436e2b0b035b7 Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Wed, 10 Jun 2020 01:07:44 +0800 Subject: [PATCH 30/91] one adaptation_sets for all the resolution in dash mode --- xcode-server/hardware/abr_hls_dash.py | 2 +- xcode-server/software/abr_hls_dash.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xcode-server/hardware/abr_hls_dash.py b/xcode-server/hardware/abr_hls_dash.py index bef88a9..38f56da 100755 --- a/xcode-server/hardware/abr_hls_dash.py +++ b/xcode-server/hardware/abr_hls_dash.py @@ -55,7 +55,7 @@ def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", - str(segment_target_duration)] + str(segment_target_duration), "-adaptation_sets", "id=0,streams=v"] cmd_hls = ["-hls_time", str(segment_target_duration), "-hls_list_size", "0"] cmd_fade_in_out = ["-an"] diff --git a/xcode-server/software/abr_hls_dash.py b/xcode-server/software/abr_hls_dash.py index bef88a9..38f56da 100755 --- a/xcode-server/software/abr_hls_dash.py +++ b/xcode-server/software/abr_hls_dash.py @@ -55,7 +55,7 @@ def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", - str(segment_target_duration)] + str(segment_target_duration), "-adaptation_sets", "id=0,streams=v"] cmd_hls = ["-hls_time", str(segment_target_duration), "-hls_list_size", "0"] cmd_fade_in_out = ["-an"] From 4b3bd576d1389342273e53de3626c619503deca9 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Wed, 10 Jun 2020 10:24:51 -0700 Subject: [PATCH 31/91] add make update for pushing images to worker nodes without private registry --- README.md | 4 +- deployment/CMakeLists.txt | 4 ++ script/scan-yaml.awk | 94 +++++++++++++++++++++++++++++++++++++++ script/update-image.sh | 80 +++++++++++++++++++++++++++++++++ 4 files changed, 181 insertions(+), 1 deletion(-) create mode 100644 script/scan-yaml.awk create mode 100755 script/update-image.sh diff --git a/README.md b/README.md index 2e77ddf..2a33a7c 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,9 @@ make --- -If you deploy the sample to a cluster, please configure the sample, as `cmake -DREGISTRY= ..`, to push the sample images to the private docker registry after each build. +If you deploy the sample to a cluster, please configure the sample, as `cmake -DREGISTRY= ..`, to push the sample images to the private docker registry after each build. + +To deploy without a private registry, run `make update` after each build to push the sample images to the cluster nodes (which requires passwordless access from the master node to the worker nodes.) --- diff --git a/deployment/CMakeLists.txt b/deployment/CMakeLists.txt index 162c01f..a910e81 100644 --- a/deployment/CMakeLists.txt +++ b/deployment/CMakeLists.txt @@ -1 +1,5 @@ include("${CMAKE_SOURCE_DIR}/script/scan-all.cmake") + +if(NOT REGISTRY) + add_custom_target(update ${CMAKE_HOME_DIRECTORY}/script/update-image.sh) +endif() diff --git a/script/scan-yaml.awk b/script/scan-yaml.awk new file mode 100644 index 0000000..56e36fc --- /dev/null +++ b/script/scan-yaml.awk @@ -0,0 +1,94 @@ +#!/usr/bin/awk + +BEGIN { + im=""; + n_space=c_space=0; + matched=1; +} + +function saveim() { + split(im,ims,","); + for (i in ims) { + if (ims[i]!="" && (matched || labels=="*")) { + images[ims[i]]=1; + } + } + im=""; + matched=1; +} + +/containers:/ { + c_space=index($0,"containers:"); +} + +/initContainers:/ { + c_space=index($0,"initContainers:"); +} + +/image:/ && c_space==0 { + saveim(); + im=$2; +} + +/image:/ && c_space>0 { + im=im","$2 +} + +/VCAC_IMAGE:/ { + im=im","$2 +} + +/- node\..*==.*/ && labels!="*" { + gsub(/[\" ]/,"",$2); + if (index(labels,$2)==0) { + im=""; + matched=0; + } +} + +/- node\..*!=.*/ && labels!="*" { + gsub(/[\" ]/,"",$2); + gsub(/!=/,"==",$2); + if (index(labels,$2)!=0) { + im=""; + matched=0; + } +} + +/^\s*---\s*$/ || /^\s*$/ { + n_space=c_space=0; + saveim(); +} + +/- key:/ && n_space>0 { + match($0, /^ */); + if (RLENGTH > n_space) key=$3 +} + +/operator:/ && n_space>0 { + match($0, /^ */); + if (RLENGTH > n_space) operator=$2 +} + +/- ".*"/ && n_space>0 { + match($0, /^ */); + if (RLENGTH > n_space) { + label_eqn=key":"$2 + gsub(/[\" ]/,"",label_eqn); + i=index(labels,label_eqn); + if ((operator=="In" && i==0) || (operator=="NotIn" && i!=0)) { + im=im2=""; + matched=0; + } + } +} + +/nodeAffinity:/ { + n_space=index($0,"nodeAffinity:"); +} + +END { + saveim(); + for (im in images) + print(im); +} diff --git a/script/update-image.sh b/script/update-image.sh new file mode 100755 index 0000000..c5dd4f5 --- /dev/null +++ b/script/update-image.sh @@ -0,0 +1,80 @@ +#!/bin/bash -e + +function transfer_image { + image="$1" + nodeid="$2" + nodeip="$3" + + # overwrite vcac username + case "$4" in + *vcac-zone:yes*|*vcac_zone==yes*) + worker="root@$nodeip";; + *) + worker="$nodeip";; + esac + + echo "Update image: $image to $worker" + sig1=$((docker image inspect -f {{.ID}} $image || ((docker pull $image 1>&2) && docker image inspect -f {{.ID}} $image)) | grep .) + echo " local: $sig1" + + hostfile="$HOME/.vcac-hosts" + if [ ! -f "$hostfile" ]; then hostfile="/etc/vcac-hosts"; fi + host=$(awk -v node="$nodeid/$nodeip" '$1==node{print$2}' "$hostfile" 2>/dev/null || true) + if [ -z "$host" ]; then host=$(hostname); fi + + CONNECTION_TIMEOUT=1 + case "$(hostname -f)" in + $host | $host.*) # direct access + sig2=$(ssh -o ConnectTimeout=$CONNECTION_TIMEOUT $worker "docker image inspect -f {{.ID}} $image 2> /dev/null || echo" || true) + echo "remote: $sig2" + + if test "$sig1" != "$sig2"; then + echo "Transfering image..." + (docker save $image | ssh -o ConnectTimeout=$CONNECTION_TIMEOUT $worker "docker image rm -f $image 2>/dev/null; docker load") || true + fi;; + *) # access via jump host + sig2=$(ssh -o ConnectTimeout=$CONNECTION_TIMEOUT $host "ssh -o ConnectTimeout=$CONNECTION_TIMEOUT $worker \"docker image inspect -f {{.ID}} $image 2> /dev/null || echo\"" || true) + echo "remote: $sig2" + + if test "$sig1" != "$sig2"; then + echo "Transfering image..." + (docker save $image | ssh -o ConnectTimeout=$CONNECTION_TIMEOUT $host "ssh -o ConnectTimeout=$CONNECTION_TIMEOUT $worker \"docker image rm -f $image 2>/dev/null; docker load\"") || true + fi;; + esac + echo "" +} + +DIR=$(dirname $(readlink -f "$0")) +docker node ls > /dev/null 2> /dev/null && ( + echo "Updating docker-swarm nodes..." + for id in $(docker node ls -q 2> /dev/null); do + ready="$(docker node inspect -f {{.Status.State}} $id)" + active="$(docker node inspect -f {{.Spec.Availability}} $id)" + nodeip="$(docker node inspect -f {{.Status.Addr}} $id)" + labels="$(docker node inspect -f {{.Spec.Labels}} $id | sed 's/map\[/node.labels./' | sed 's/\]$//' | sed 's/ / node.labels./g' | sed 's/:/==/g')" + role="$(docker node inspect -f {{.Spec.Role}} $id)" + + if test "$ready" = "ready"; then + if test "$active" = "active"; then + # skip unavailable or manager node + if test -z "$(hostname -I | grep --fixed-strings $nodeip)"; then + for image in $(awk -v labels="$labels node.role=${role}" -f "$DIR/scan-yaml.awk" "${DIR}/../deployment/docker-swarm/docker-compose.yml"); do + transfer_image $image "$id" "$nodeip" "$labels" + done + fi + fi + fi + done +) || echo -n "" + +kubectl get node >/dev/null 2>/dev/null && ( + echo "Updating Kubernetes nodes..." + for id in $(kubectl get nodes --selector='!node-role.kubernetes.io/master' 2> /dev/null | grep ' Ready ' | cut -f1 -d' '); do + nodeip="$(kubectl describe node $id | grep InternalIP | sed -E 's/[^0-9]+([0-9.]+)$/\1/')" + labels="$(kubectl describe node $id | awk '/Annotations:/{lf=0}/Labels:/{sub("Labels:","",$0);lf=1}lf==1{sub("=",":",$1);print$1}')" + + for image in $(awk -v labels="$labels" -f "$DIR/scan-yaml.awk" "${DIR}/../deployment/kubernetes/yaml"/*.yaml); do + transfer_image $image "$id" "$nodeip" "$labels" + done + done +) || echo -n "" From 74390115e572bed7a97efa8e8a81fc4df434b3e8 Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Fri, 19 Jun 2020 23:31:55 +0800 Subject: [PATCH 32/91] fix to scripte mkvolume.sh --- deployment/kubernetes/mkvolume.sh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/deployment/kubernetes/mkvolume.sh b/deployment/kubernetes/mkvolume.sh index 91d2092..86b440c 100755 --- a/deployment/kubernetes/mkvolume.sh +++ b/deployment/kubernetes/mkvolume.sh @@ -21,15 +21,17 @@ BEGIN{ } /- ".*"/ { host=host2ip[substr($2,2,length($2)-2)]; - paths[host][path]=1; - contents[host][path]=content + paths[host,path]=1; + contents[host,path]=content } END { - for (host in paths) { - for (path in paths[host]) { - system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); - system("scp -r "contents[host][path]"/* "host":"path); - } + for (item in paths) { + split(item,tmp,SUBSEP); + host=tmp[1] + path=tmp[2]; + print host, path; + system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); + system("scp -r "contents[host,path]"/* "host":"path); } } ' "$DIR"/*-pv.yaml From 539b711fa12a805d2d0ead24f9a5a61b0bbe1a3f Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Fri, 19 Jun 2020 23:31:55 +0800 Subject: [PATCH 33/91] fix to scripte mkvolume.sh --- deployment/kubernetes/mkvolume.sh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/deployment/kubernetes/mkvolume.sh b/deployment/kubernetes/mkvolume.sh index 91d2092..86b440c 100755 --- a/deployment/kubernetes/mkvolume.sh +++ b/deployment/kubernetes/mkvolume.sh @@ -21,15 +21,17 @@ BEGIN{ } /- ".*"/ { host=host2ip[substr($2,2,length($2)-2)]; - paths[host][path]=1; - contents[host][path]=content + paths[host,path]=1; + contents[host,path]=content } END { - for (host in paths) { - for (path in paths[host]) { - system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); - system("scp -r "contents[host][path]"/* "host":"path); - } + for (item in paths) { + split(item,tmp,SUBSEP); + host=tmp[1] + path=tmp[2]; + print host, path; + system("ssh "host" \"mkdir -p "path";find "path" -mindepth 1 -maxdepth 1 -exec rm -rf {} \\\\;\""); + system("scp -r "contents[host,path]"/* "host":"path); } } ' "$DIR"/*-pv.yaml From 75e467c5f5ee913f36f22c8e97de611c24826f54 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Mon, 3 Aug 2020 19:41:44 -0700 Subject: [PATCH 34/91] run CDN as non-root --- deployment/docker-swarm/docker-compose.yml.m4 | 4 +++- .../templates/kafka-service-deployment.yaml | 4 +++- .../templates/redis-service-deployment.yaml | 3 +++ .../templates/zookeeper-service-deployment.yaml | 3 +++ deployment/kubernetes/yaml/kafka-deploy.yaml.m4 | 2 +- deployment/kubernetes/yaml/redis-deploy.yaml.m4 | 2 ++ deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 | 2 ++ kafka/CMakeLists.txt | 2 ++ kafka/Dockerfile | 11 +++++++++++ kafka/build.sh | 6 ++++++ kafka/shell.sh | 6 ++++++ 11 files changed, 42 insertions(+), 3 deletions(-) create mode 100644 kafka/CMakeLists.txt create mode 100644 kafka/Dockerfile create mode 100755 kafka/build.sh create mode 100755 kafka/shell.sh diff --git a/deployment/docker-swarm/docker-compose.yml.m4 b/deployment/docker-swarm/docker-compose.yml.m4 index 338e4a1..74340c5 100644 --- a/deployment/docker-swarm/docker-compose.yml.m4 +++ b/deployment/docker-swarm/docker-compose.yml.m4 @@ -7,6 +7,7 @@ services: restart: always deploy: replicas: 1 + user: redis command: redis-server @@ -20,12 +21,13 @@ services: ZOOKEEPER_MAX_CLIENT_CNXNS: '20000' ZOOKEEPER_LOG4J_LOGGERS: 'zookeepr=ERROR' ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'ERROR' + user: zookeeper restart: always deploy: replicas: 1 kafka-service: - image: wurstmeister/kafka:latest + image: defn(`REGISTRY_PREFIX')ovc_kafka_service:latest depends_on: - zookeeper-service environment: diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml index e7ddb3e..072c3a0 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml @@ -48,8 +48,10 @@ spec: value: "1" - name: KAFKA_ZOOKEEPER_CONNECT value: zookeeper-service:2181 - image: wurstmeister/kafka:latest + image: {{ $.Values.registryPrefix }}ovc_kafka_service:latest + imagePullPolicy: IfNotPresent name: kafka-service ports: - containerPort: 9092 restartPolicy: Always + diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml index ce7efa0..2377064 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml @@ -19,7 +19,10 @@ spec: - args: - redis-server image: redis:latest + imagePullPolicy: IfNotPresent name: redis-service ports: - containerPort: 6379 + securityContext: + runAsUser: 999 restartPolicy: Always diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml index 4919bf5..f17e857 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml @@ -32,7 +32,10 @@ spec: - name: ZOOKEEPER_TICK_TIME value: "2000" image: zookeeper:latest + imagePullPolicy: IfNotPresent name: zookeeper-service ports: - containerPort: 2181 + securityContext: + runAsUser: 1000 restartPolicy: Always diff --git a/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 b/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 index c0b6959..cd8e2f7 100644 --- a/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 @@ -20,7 +20,7 @@ spec: enableServiceLinks: false containers: - name: kafka - image: wurstmeister/kafka:2.12-2.4.0 + image: defn(`REGISTRY_PREFIX')ovc_kafka_service:latest imagePullPolicy: IfNotPresent ports: - containerPort: 9092 diff --git a/deployment/kubernetes/yaml/redis-deploy.yaml.m4 b/deployment/kubernetes/yaml/redis-deploy.yaml.m4 index 85e8208..963043e 100644 --- a/deployment/kubernetes/yaml/redis-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/redis-deploy.yaml.m4 @@ -25,6 +25,8 @@ spec: - redis-server ports: - containerPort: 6379 + securityContext: + runAsUser: 999 resources: requests: cpu: defn(`REDIS_CPU') diff --git a/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 b/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 index 5b6668e..d270804 100644 --- a/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/zookeeper-deploy.yaml.m4 @@ -33,6 +33,8 @@ spec: value: "1" - name: "ZOO_LOG4J_PROP" value: "ERROR" + securityContext: + runAsUser: 1000 resources: requests: cpu: defn(`ZOOKEEPER_CPU') diff --git a/kafka/CMakeLists.txt b/kafka/CMakeLists.txt new file mode 100644 index 0000000..5f53cc3 --- /dev/null +++ b/kafka/CMakeLists.txt @@ -0,0 +1,2 @@ +set(service "ovc_kafka_service") +include("${CMAKE_SOURCE_DIR}/script/service.cmake") diff --git a/kafka/Dockerfile b/kafka/Dockerfile new file mode 100644 index 0000000..095d9ad --- /dev/null +++ b/kafka/Dockerfile @@ -0,0 +1,11 @@ + +FROM wurstmeister/kafka:2.12-2.4.0 + +RUN sed -i 's/\/kafka\/kafka/\/opt\/kafka\/logs\/kafka/' /usr/bin/start-kafka.sh && \ + mkdir /opt/kafka/logs + +RUN addgroup kafka && \ + adduser -D -H -G kafka kafka && \ + chown -R kafka:kafka /opt /kafka + +USER kafka diff --git a/kafka/build.sh b/kafka/build.sh new file mode 100755 index 0000000..71bc770 --- /dev/null +++ b/kafka/build.sh @@ -0,0 +1,6 @@ +#!/bin/bash -e + +IMAGE="ovc_kafka_service" +DIR=$(dirname $(readlink -f "$0")) + +. "${DIR}/../script/build.sh" diff --git a/kafka/shell.sh b/kafka/shell.sh new file mode 100755 index 0000000..3b8b415 --- /dev/null +++ b/kafka/shell.sh @@ -0,0 +1,6 @@ +#!/bin/bash -e + +IMAGE="ovc_kafka_service" +DIR=$(dirname $(readlink -f "$0")) + +. "${DIR}/../script/shell.sh" From 9b040bd552fbeb35f9faea368f382fa18ce0527f Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Tue, 4 Aug 2020 09:13:00 -0700 Subject: [PATCH 35/91] change kafka to use regular user uid/gid --- kafka/Dockerfile | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kafka/Dockerfile b/kafka/Dockerfile index 095d9ad..0852add 100644 --- a/kafka/Dockerfile +++ b/kafka/Dockerfile @@ -4,8 +4,10 @@ FROM wurstmeister/kafka:2.12-2.4.0 RUN sed -i 's/\/kafka\/kafka/\/opt\/kafka\/logs\/kafka/' /usr/bin/start-kafka.sh && \ mkdir /opt/kafka/logs -RUN addgroup kafka && \ - adduser -D -H -G kafka kafka && \ +ARG UID +ARG GID +RUN addgroup -g ${GID} kafka && \ + adduser -D -H -G kafka -g ${GID} -u ${UID} kafka && \ chown -R kafka:kafka /opt /kafka -USER kafka +USER ${UID} From 24048bef2935ac8846186f27cce6bd48ae11c215 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Thu, 6 Aug 2020 07:34:30 -0700 Subject: [PATCH 36/91] Revert "change kafka to use regular user uid/gid" This reverts commit 9b040bd552fbeb35f9faea368f382fa18ce0527f. --- kafka/Dockerfile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/kafka/Dockerfile b/kafka/Dockerfile index 0852add..095d9ad 100644 --- a/kafka/Dockerfile +++ b/kafka/Dockerfile @@ -4,10 +4,8 @@ FROM wurstmeister/kafka:2.12-2.4.0 RUN sed -i 's/\/kafka\/kafka/\/opt\/kafka\/logs\/kafka/' /usr/bin/start-kafka.sh && \ mkdir /opt/kafka/logs -ARG UID -ARG GID -RUN addgroup -g ${GID} kafka && \ - adduser -D -H -G kafka -g ${GID} -u ${UID} kafka && \ +RUN addgroup kafka && \ + adduser -D -H -G kafka kafka && \ chown -R kafka:kafka /opt /kafka -USER ${UID} +USER kafka From 4fd8efb7d85195cb0f139ca2ea4f0835c89917b7 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Thu, 6 Aug 2020 07:40:01 -0700 Subject: [PATCH 37/91] add runAsUser --- deployment/docker-swarm/docker-compose.yml.m4 | 1 + .../helm/cdn-transcode/templates/kafka-service-deployment.yaml | 2 ++ deployment/kubernetes/yaml/kafka-deploy.yaml.m4 | 2 ++ 3 files changed, 5 insertions(+) diff --git a/deployment/docker-swarm/docker-compose.yml.m4 b/deployment/docker-swarm/docker-compose.yml.m4 index 74340c5..a229165 100644 --- a/deployment/docker-swarm/docker-compose.yml.m4 +++ b/deployment/docker-swarm/docker-compose.yml.m4 @@ -47,6 +47,7 @@ services: KAFKA_HEAP_OPTS: '-Xmx1024m -Xms1024m' KAFKA_LOG4J_LOGGERS: 'kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR' KAFKA_LOG4J_ROOT_LOGLEVEL: 'ERROR' + user: kafka restart: always deploy: replicas: 1 diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml index 072c3a0..6a46b1c 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml @@ -53,5 +53,7 @@ spec: name: kafka-service ports: - containerPort: 9092 + securityContext: + runAsUser: 1000 restartPolicy: Always diff --git a/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 b/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 index cd8e2f7..a860bdb 100644 --- a/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 @@ -53,6 +53,8 @@ spec: value: "`-Xmx'defn(`KAFKA_MEMORY')m -`Xms'defn(`KAFKA_MEMORY')m" - name: "KAFKA_LOG4J_ROOT_LOGLEVEL" value: "ERROR" + securityContext: + runAsUser: 1000 resources: requests: cpu: defn(`KAFKA_CPU') From e887cfc2488c1ad9da0051d9b26dc4b1975a1aba Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Thu, 6 Aug 2020 15:58:34 -0700 Subject: [PATCH 38/91] upgrade to v20.7 --- cdn-server/Dockerfile | 2 +- streaming-server/Dockerfile | 2 +- xcode-server/hardware/Dockerfile | 2 +- xcode-server/software/Dockerfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cdn-server/Dockerfile b/cdn-server/Dockerfile index 86af099..eb3c9bd 100644 --- a/cdn-server/Dockerfile +++ b/cdn-server/Dockerfile @@ -1,5 +1,5 @@ -FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.3.1 +FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.7 Run DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-setuptools python3-redis python-celery-common python3-tornado python3-kafka python3-kazoo openssh-server && rm -rf /var/lib/apt/lists/* diff --git a/streaming-server/Dockerfile b/streaming-server/Dockerfile index 8936362..41978ca 100644 --- a/streaming-server/Dockerfile +++ b/streaming-server/Dockerfile @@ -1,5 +1,5 @@ -FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.3.1 +FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.7 COPY *.conf /etc/nginx/ CMD ["/usr/sbin/nginx"] WORKDIR /home diff --git a/xcode-server/hardware/Dockerfile b/xcode-server/hardware/Dockerfile index cc3caf7..db2336c 100644 --- a/xcode-server/hardware/Dockerfile +++ b/xcode-server/hardware/Dockerfile @@ -1,5 +1,5 @@ -FROM openvisualcloud/xeone3-ubuntu1804-media-ffmpeg:20.3.1 +FROM openvisualcloud/xeone3-ubuntu1804-media-ffmpeg:20.7 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-tornado python3-kafka python3-kazoo && rm -rf /var/lib/apt/lists/* diff --git a/xcode-server/software/Dockerfile b/xcode-server/software/Dockerfile index f6c06f7..b60aac2 100644 --- a/xcode-server/software/Dockerfile +++ b/xcode-server/software/Dockerfile @@ -1,5 +1,5 @@ -FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:20.3.1 +FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:20.7 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-tornado python3-kafka python3-kazoo && rm -rf /var/lib/apt/lists/* COPY *.py /home/ CMD ["/home/main.py"] From 5621e074d9da6582dd210d4eae88e08a83942943 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Thu, 8 Oct 2020 15:16:47 -0700 Subject: [PATCH 39/91] update README --- README.md | 28 +++++----------------------- 1 file changed, 5 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 2a33a7c..57bff3e 100644 --- a/README.md +++ b/README.md @@ -5,24 +5,6 @@ [![License](https://img.shields.io/badge/license-BSD_3_Clause-green.svg)](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/blob/master/LICENSE) [![Contributions](https://img.shields.io/badge/contributions-welcome-blue.svg)](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki) -Table of Contents -================= - * [Open Visual Cloud CDN Transcode Sample](#open-visual-cloud-cdn-transcode-sample) - * [Architecture](#architecture) - * [What's in this project](#whats-in-this-project) - * [System requirements](#system-requirements) - * [Operating system](#operating-system) - * [How to setup The CDN Transcode Sample](#how-to-setup-the-cdn-transcode-sample) - * [Setup the CDN Transcode Sample OS environment(Both of master and slave nodes)](#setup-the-cdn-transcode-sample-os-environmentboth-of-master-and-slave-nodes) - * [Install ubuntu18.04.2/CentOS 7.6](#install-ubuntu18042centos-76) - * [Setup CDN environment(Both of master and slave nodes)](#setup-cdn-environmentboth-of-master-and-slave-nodes) - * [Install the third-party dependency Libraries and tools](#install-the-third-party-dependency-libraries-and-tools) - * [Setup docker proxy as follows if you are behind a firewall](#setup-docker-proxy-as-follows-if-you-are-behind-a-firewall) - * [Build(Both of master and slave nodes)](#buildboth-of-master-and-slave-nodes) - * [Deploy](#deploy) - * [Auto deployment using Kubernetes](#auto-deployment-using-kubernetes) - * [See Also](#see-also) - The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box CDN media transcode service, including live streaming and video on demand. It also provides Docker-based media delivery software development environment upon which developer can easily build their specific applications. # Architecture @@ -48,13 +30,13 @@ The CDN Transcode Sample may run on Linux* 64 bit operating systems. The list be - CentOS* 7.6 # How to setup The CDN Transcode Sample -## Setup the CDN Transcode Sample OS environment(Both of master and slave nodes) +## Setup the CDN Transcode Sample OS environment Install Ubuntu 18.04.2/CentOS 7.6 on CDN-Transcode Server, and configure the IP address & proxy properly. ### Install ubuntu18.04.2/CentOS 7.6 - [Download Ubuntu and Install](https://ubuntu.com/download) - [Download CentOS and install](https://www.centos.org/download/) -## Setup CDN environment(Both of master and slave nodes) +## Setup CDN environment ### Install the third-party dependency Libraries and tools ``` sudo -E ./script/install_dependency.sh @@ -66,7 +48,7 @@ printf "[Service]\nEnvironment=\"HTTPS_PROXY=$https_proxy\" \"NO_PROXY=$no_proxy sudo systemctl daemon-reload sudo systemctl restart docker ``` -## Build(Both of master and slave nodes) +## Build Run below commands to build docker images ``` @@ -88,10 +70,10 @@ To deploy without a private registry, run `make update` after each build to push ## Deploy ### Auto deployment using Kubernetes -**Tips:** It divides into two parts: master or slave ones +**Tips:** It divides into two parts: - [Setup Kubernetes master environment for CentOS](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-master-environment-for-CentOS) - [Setup Kubernetes master environment for Ubuntu](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-master-environment-for-Ubuntu) -- [Setup Kubernetes slave environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-slave-environment) +- [Setup Kubernetes worker environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-worker-environment) - [Setup NFS environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-NFS-environment) Start CDN transcode service From 2cc83aae1926f3ceffc5e242034eed1c3ef8de08 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Thu, 8 Oct 2020 15:21:03 -0700 Subject: [PATCH 40/91] update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 57bff3e..17c2cb5 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ To deploy without a private registry, run `make update` after each build to push **Tips:** It divides into two parts: - [Setup Kubernetes master environment for CentOS](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-master-environment-for-CentOS) - [Setup Kubernetes master environment for Ubuntu](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-master-environment-for-Ubuntu) -- [Setup Kubernetes worker environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-worker-environment) +- [Setup Kubernetes worker environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-Worker-Environment) - [Setup NFS environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-NFS-environment) Start CDN transcode service From a7ef410f1a9097280364da8cbc39ddd90263c9db Mon Sep 17 00:00:00 2001 From: xwu2 <3871873+xwu2git@users.noreply.github.com> Date: Thu, 15 Oct 2020 14:06:02 -0700 Subject: [PATCH 41/91] Update README.md --- README.md | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 17c2cb5..1eaa0d8 100644 --- a/README.md +++ b/README.md @@ -13,16 +13,6 @@ The sample implements a reference server-side transcode system over CDN infrastr -# What's in this project -The CDN Transcode Sample contains below components: -- Dockerfiles -- Python web services source code -- BASH glue scripts -- HTML web pages -- CMakefiles -- Configuration files -- Documents - # System requirements ## Operating system The CDN Transcode Sample may run on Linux* 64 bit operating systems. The list below represents the operating systems that the transcode application and library were tested and validated on: @@ -30,11 +20,6 @@ The CDN Transcode Sample may run on Linux* 64 bit operating systems. The list be - CentOS* 7.6 # How to setup The CDN Transcode Sample -## Setup the CDN Transcode Sample OS environment -Install Ubuntu 18.04.2/CentOS 7.6 on CDN-Transcode Server, and configure the IP address & proxy properly. -### Install ubuntu18.04.2/CentOS 7.6 -- [Download Ubuntu and Install](https://ubuntu.com/download) -- [Download CentOS and install](https://www.centos.org/download/) ## Setup CDN environment ### Install the third-party dependency Libraries and tools @@ -71,9 +56,8 @@ To deploy without a private registry, run `make update` after each build to push ### Auto deployment using Kubernetes **Tips:** It divides into two parts: -- [Setup Kubernetes master environment for CentOS](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-master-environment-for-CentOS) -- [Setup Kubernetes master environment for Ubuntu](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-master-environment-for-Ubuntu) -- [Setup Kubernetes worker environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-Worker-Environment) +- [Setup Kubernetes for CentOS](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-for-CentOS) +- [Setup Kubernetes for Ubuntu](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-for-Ubuntu) - [Setup NFS environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-NFS-environment) Start CDN transcode service From d6ad4ab397d3f3cb5146ce2c6c4f8a4f34720f4f Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Wed, 16 Dec 2020 10:34:11 -0500 Subject: [PATCH 42/91] add batch service to trigger transcoding only --- CMakeLists.txt | 11 ++- batch/.dockerignore | 3 + batch/CMakeLists.txt | 2 + batch/Dockerfile | 22 ++++++ batch/build.sh | 6 ++ batch/main.py | 43 +++++++++++ batch/messaging.py | 77 +++++++++++++++++++ batch/nginx.conf | 46 +++++++++++ batch/shell.sh | 7 ++ batch/transcoding.json | 54 +++++++++++++ cdn-server/schedule.py | 16 +++- deployment/docker-swarm/build.sh | 5 +- deployment/kubernetes/helm/build.sh | 5 +- deployment/kubernetes/yaml/build.sh | 6 +- deployment/kubernetes/yaml/cdn-deploy.yaml.m4 | 5 +- deployment/kubernetes/yaml/cdn-svc.yaml.m4 | 4 + deployment/kubernetes/yaml/start.sh | 5 +- script/build.sh | 2 +- script/deployment.cmake | 2 +- script/service.cmake | 2 +- xcode-server/hardware/abr_hls_dash.py | 8 +- xcode-server/hardware/main.py | 63 +++++++-------- xcode-server/software/abr_hls_dash.py | 8 +- xcode-server/software/main.py | 63 +++++++-------- 24 files changed, 377 insertions(+), 88 deletions(-) create mode 100644 batch/.dockerignore create mode 100644 batch/CMakeLists.txt create mode 100644 batch/Dockerfile create mode 100755 batch/build.sh create mode 100755 batch/main.py create mode 100755 batch/messaging.py create mode 100644 batch/nginx.conf create mode 100755 batch/shell.sh create mode 100644 batch/transcoding.json diff --git a/CMakeLists.txt b/CMakeLists.txt index b632e78..8f240fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,6 +14,14 @@ if (NOT DEFINED NLIVES) set(NLIVES "1") endif() +if (NOT DEFINED SCENARIO) + set(SCENARIO "cdn") +endif() + +if (SCENARIO STREQUAL "batch") + set(NLIVES "0") +endif() + file(GLOB dirs "deployment" "*") list(REMOVE_DUPLICATES dirs) foreach(dir ${dirs}) @@ -24,5 +32,6 @@ endforeach() # legal message execute_process(COMMAND printf "\nThis script will build third party components licensed under various open source licenses into your container images. The terms under which those components may be used and distributed can be found with the license document that is provided with those components. Please familiarize yourself with those terms to ensure your distribution of those components complies with the terms of those licenses.\n\n") -execute_process(COMMAND printf "\n-- Setting: NVODS=${NVODS}, NLIVES=${NLIVES}\n") +execute_process(COMMAND printf "\n-- Setting: SCENARIO=${SCENARIO}\n") +execute_process(COMMAND printf "-- Setting: NVODS=${NVODS}, NLIVES=${NLIVES}\n") execute_process(COMMAND printf "-- Setting: REGISTRY=${REGISTRY}\n") diff --git a/batch/.dockerignore b/batch/.dockerignore new file mode 100644 index 0000000..596bd5f --- /dev/null +++ b/batch/.dockerignore @@ -0,0 +1,3 @@ +CMakeLists.txt +*.m4 +test/* diff --git a/batch/CMakeLists.txt b/batch/CMakeLists.txt new file mode 100644 index 0000000..7dc4dc4 --- /dev/null +++ b/batch/CMakeLists.txt @@ -0,0 +1,2 @@ +set(service "ovc_batch_service") +include("${CMAKE_SOURCE_DIR}/script/service.cmake") diff --git a/batch/Dockerfile b/batch/Dockerfile new file mode 100644 index 0000000..4698bc2 --- /dev/null +++ b/batch/Dockerfile @@ -0,0 +1,22 @@ + +FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.7 + +Run DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-setuptools python3-redis python-celery-common python3-tornado python3-kafka python3-kazoo openssh-server && rm -rf /var/lib/apt/lists/* + +COPY *.py /home/ +COPY *.json /home/ +COPY *.conf /etc/nginx/ +CMD ["/bin/bash","-c","/home/main.py&/usr/local/sbin/nginx"] +WORKDIR /home + +#### +ARG UID +ARG GID +## must use ; here to ignore user exist status code +RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \ + [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \ + touch /var/run/nginx.pid && \ + mkdir -p /var/log/nginx /var/lib/nginx /var/www/video /var/www/archive && \ + chown -R ${UID}:${GID} /var/run/nginx.pid /var/www /var/log/nginx /var/lib/nginx +USER ${UID} +#### diff --git a/batch/build.sh b/batch/build.sh new file mode 100755 index 0000000..f2c3532 --- /dev/null +++ b/batch/build.sh @@ -0,0 +1,6 @@ +#!/bin/bash -e + +IMAGE="ovc_batch_service" +DIR=$(dirname $(readlink -f "$0")) + +. "${DIR}/../script/build.sh" diff --git a/batch/main.py b/batch/main.py new file mode 100755 index 0000000..e05d85e --- /dev/null +++ b/batch/main.py @@ -0,0 +1,43 @@ +#!/usr/bin/python3 + +from os.path import isfile +from messaging import Producer +import time +from os import listdir +import json + +KAFKA_TOPIC_VODS = "content_provider_sched_vods" +KAFKA_TOPIC_LIVES = "content_provider_sched_lives" +DASHLS_ROOT = "/var/www" +ARCHIVE_ROOT = "/var/www/archive" + +config_file="/home/transcoding.json" + +streams = [s for s in listdir(ARCHIVE_ROOT) if s.endswith((".mp4", ".avi"))] + +info={} +with open(config_file,"rt") as fd: + info=json.load(fd) + +print(info,flush=True) + +producer = Producer() +for stream in info[0]["vods"]: + # schedule producing the stream + if stream["name"] in streams: + msg=stream + print("start VOD transccoding on {} with {}: ".format(stream["name"],stream["type"]), flush=True) + print(msg,flush=True) + producer.send(KAFKA_TOPIC_VODS, json.dumps(msg)) + # wait until file is available, return it + start_time = time.time() + while time.time() - start_time < 60: + if isfile(DASHLS_ROOT+"/"+stream["name"]): break + time.sleep(1) + +producer.close() + +while True: + print("Running...",flush=True) + time.sleep(30) + diff --git a/batch/messaging.py b/batch/messaging.py new file mode 100755 index 0000000..f11184a --- /dev/null +++ b/batch/messaging.py @@ -0,0 +1,77 @@ +#!/usr/bin/python3 + +import socket +from kafka import KafkaProducer, KafkaConsumer, TopicPartition +import traceback +import socket +import time + +KAFKA_HOSTS = ["kafka-service:9092"] + +class Producer(object): + def __init__(self): + super(Producer, self).__init__() + self._client_id = socket.gethostname() + self._producer = None + + def send(self, topic, message): + if not self._producer: + try: + self._producer = KafkaProducer(bootstrap_servers=KAFKA_HOSTS, + client_id=self._client_id, + api_version=(0, 10), acks=0) + except: + print(traceback.format_exc(), flush=True) + self._producer = None + + try: + self._producer.send(topic, message.encode('utf-8')) + except: + print(traceback.format_exc(), flush=True) + + def flush(self): + if self._producer: + self._producer.flush() + + def close(self): + if self._producer: + self._producer.close() + self._producer=None + +class Consumer(object): + def __init__(self, group=None): + super(Consumer, self).__init__() + self._client_id = socket.gethostname() + self._group = group + + def messages(self, topic, timeout=None): + c = KafkaConsumer(topic, bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, + group_id=self._group, auto_offset_reset="earliest", api_version=(0, 10)) + + for msg in c: + yield msg.value.decode('utf-8') + c.close() + + def debug(self, topic): + c = KafkaConsumer(bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, + group_id=None, api_version=(0, 10)) + + # assign/subscribe topic + partitions = c.partitions_for_topic(topic) + if not partitions: + raise Exception("Topic "+topic+" not exist") + c.assign([TopicPartition(topic, p) for p in partitions]) + + # seek to beginning if needed + c.seek_to_beginning() + + # fetch messages + while True: + partitions = c.poll(100) + if partitions: + for p in partitions: + for msg in partitions[p]: + yield msg.value.decode('utf-8') + yield "" + + c.close() diff --git a/batch/nginx.conf b/batch/nginx.conf new file mode 100644 index 0000000..2549836 --- /dev/null +++ b/batch/nginx.conf @@ -0,0 +1,46 @@ + +worker_processes auto; +worker_rlimit_nofile 8192; +daemon off; +error_log /var/www/log/error.log warn; + +events { + worker_connections 4096; +} + +rtmp { + server { + listen 1935; + chunk_size 4000; + + application stream { + live on; + } + + application hls { + live on; + hls on; + hls_path /var/www/video/hls; + hls_nested on; + hls_fragment 3; + hls_playlist_length 60; + hls_variant _low BANDWIDTH=2048000 RESOLUTION=854x480; + hls_variant _mid BANDWIDTH=4096000 RESOLUTION=1280x720; + hls_variant _hi BANDWIDTH=8192000 RESOLUTION=1920x1080; + } + + application dash { + live on; + dash on; + dash_path /var/www/video/dash; + dash_fragment 4; + dash_playlist_length 120; + dash_nested on; + dash_repetition on; + dash_cleanup on; + dash_variant _low bandwidth="2048000" width="854" height="480"; + dash_variant _med bandwidth="4096000" width="1280" height="720"; + dash_variant _hi bandwidth="8192000" width="1920" height="1080" max; + } + } +} diff --git a/batch/shell.sh b/batch/shell.sh new file mode 100755 index 0000000..fcf6b39 --- /dev/null +++ b/batch/shell.sh @@ -0,0 +1,7 @@ +#!/bin/bash -e + +IMAGE="ovc_batch_service" +DIR=$(dirname $(readlink -f "$0")) +OPTIONS=("--volume=${DIR}/../../volume/video/archive:/var/www/archive:ro" "--volume=${DIR}/../../volume/video/dash:/var/www/dash:ro" "--volume=${DIR}/../../volume/video/hls:/var/www/hls:ro") + +. "${DIR}/../script/shell.sh" diff --git a/batch/transcoding.json b/batch/transcoding.json new file mode 100644 index 0000000..7759134 --- /dev/null +++ b/batch/transcoding.json @@ -0,0 +1,54 @@ +[{ + "vods": [{ + "name": "bbb_sunflower_1080p_30fps_normal.mp4", + "type": "hls", + "parameters": [ + [3840, 2160, 14000000, 192000], + [2560, 1440, 10000000, 192000], + [1920, 1080, 5000000, 192000], + [1280, 720, 2800000, 192000], + [842, 480, 1400000, 128000], + [640, 360, 800000, 128000] + ], + "codec": "AVC", + "loop": 0, + "target": "file", + "platform": "software" + },{ + "name": "bbb_sunflower_1080p_30fps_normal.mp4", + "type": "dash", + "parameters": [ + [3840, 2160, 14000000, 192000], + [2560, 1440, 10000000, 192000], + [1920, 1080, 5000000, 192000], + [1280, 720, 2800000, 192000], + [842, 480, 1400000, 128000], + [640, 360, 800000, 128000] + ], + "codec": "AVC", + "loop": 0, + "target": "file", + "platform": "software" + }], + "lives": [{ + "name": "bbb_sunflower_1080p_30fps_normal.mp4", + "type": "hls", + "parameters": [ + [1920, 1080, 5000000, 192000] + ], + "codec": "AVC", + "loop": 1, + "target": "rtmp", + "platform": "software" + },{ + "name": "bbb_sunflower_1080p_30fps_normal.mp4", + "type": "dash", + "parameters": [ + [1920, 1080, 5000000, 192000] + ], + "codec": "AVC", + "loop": 1, + "target": "rtmp", + "platform": "software" + }] +}] diff --git a/cdn-server/schedule.py b/cdn-server/schedule.py index 04a9935..1450d93 100755 --- a/cdn-server/schedule.py +++ b/cdn-server/schedule.py @@ -4,19 +4,29 @@ from tornado import web, gen from messaging import Producer import time +import json -KAFKA_TOPIC = "content_provider_sched" +KAFKA_TOPIC_VODS = "content_provider_sched_vods" DASHLS_ROOT = "/var/www" class ScheduleHandler(web.RequestHandler): @gen.coroutine def get(self): - stream = self.request.uri.replace("/schedule/", "") + stream = self.requeist.uri.replace("/schedule/", "") # schedule producing the stream print("request received to process stream: "+stream, flush=True) producer = Producer() - producer.send(KAFKA_TOPIC, stream) + msg.update({ + "name":stream.split("/")[1], + "type":stream.split("/")[0], + "parameters": [ ], + "codec": "AVC", + "loop": 0, + "target": "file", + "platform": "software" + }) + producer.send(KAFKA_TOPIC_VODS, msg) producer.close() # wait until file is available, return it diff --git a/deployment/docker-swarm/build.sh b/deployment/docker-swarm/build.sh index 402be67..eb9447d 100755 --- a/deployment/docker-swarm/build.sh +++ b/deployment/docker-swarm/build.sh @@ -2,9 +2,10 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" -REGISTRY="$3" +SCENARIO="${3:-cdn}" +REGISTRY="$4" rm -rf "$DIR/../../volume/video/cache" mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash" -m4 -DNVODS=${NVODS} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${DIR}/docker-compose.yml.m4" > "${DIR}/docker-compose.yml" +m4 -DNVODS=${NVODS} -DSCENARIO=${SCENARIO} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${DIR}/docker-compose.yml.m4" > "${DIR}/docker-compose.yml" diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh index 0bd8545..94b296e 100755 --- a/deployment/kubernetes/helm/build.sh +++ b/deployment/kubernetes/helm/build.sh @@ -3,7 +3,8 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" -REGISTRY="$3" +SCENARIO="${3:-cdn}" +REGISTRY="$4" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') # make sure helm is functional @@ -11,5 +12,5 @@ helm version >/dev/null 2>/dev/null || exit 0 echo "Generating helm chart" . "${DIR}/../volume-info.sh" -m4 -DREGISTRY_PREFIX=${REGISTRY} -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml" +m4 -DREGISTRY_PREFIX=${REGISTRY} -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DSCENARIO=${SCENARIO} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml" diff --git a/deployment/kubernetes/yaml/build.sh b/deployment/kubernetes/yaml/build.sh index 4763117..9228039 100755 --- a/deployment/kubernetes/yaml/build.sh +++ b/deployment/kubernetes/yaml/build.sh @@ -3,10 +3,12 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" -REGISTRY="$3" +SCENARIO="${3:-cdn}" +REGISTRY="$4" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') . "${DIR}/../volume-info.sh" +echo "NVODS=${NVODS} NLIVES=${NLIVES} SCENARIO=${SCENARIO}" for template in $(find "${DIR}" -maxdepth 1 -name "*.yaml.m4" -print); do - m4 -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${template/.m4/}" + m4 -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DSCENARIO=${SCENARIO} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${template/.m4/}" done diff --git a/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 b/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 index 88c03fa..a316b4f 100644 --- a/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 @@ -20,10 +20,12 @@ spec: enableServiceLinks: false containers: - name: cdn - image: defn(`REGISTRY_PREFIX')ovc_cdn_service:latest + image: defn(`REGISTRY_PREFIX')`ovc_'defn(`SCENARIO')_service:latest imagePullPolicy: IfNotPresent ports: +ifelse(defn(`SCENARIO'),`cdn',`dnl - containerPort: 8443 +')dnl - containerPort: 1935 resources: limits: @@ -51,3 +53,4 @@ spec: secret: secretName: self-signed-certificate PLATFORM_NODE_SELECTOR(`Xeon')dnl + diff --git a/deployment/kubernetes/yaml/cdn-svc.yaml.m4 b/deployment/kubernetes/yaml/cdn-svc.yaml.m4 index c285836..0cbe771 100644 --- a/deployment/kubernetes/yaml/cdn-svc.yaml.m4 +++ b/deployment/kubernetes/yaml/cdn-svc.yaml.m4 @@ -1,3 +1,4 @@ + apiVersion: v1 kind: Service metadata: @@ -6,9 +7,11 @@ metadata: app: cdn spec: ports: +ifelse(defn(`SCENARIO'),`cdn',`dnl - port: 443 targetPort: 8443 name: https +')dnl - port: 1935 targetPort: 1935 name: rtmp @@ -16,3 +19,4 @@ spec: - defn(`HOSTIP') selector: app: cdn + diff --git a/deployment/kubernetes/yaml/start.sh b/deployment/kubernetes/yaml/start.sh index cccd4ae..56bb9e3 100755 --- a/deployment/kubernetes/yaml/start.sh +++ b/deployment/kubernetes/yaml/start.sh @@ -1,8 +1,9 @@ #!/bin/bash -e DIR=$(dirname $(readlink -f "$0")) -NVODS="${2:-1}" -NLIVES="${3:-1}" +NVODS="${1:-1}" +NLIVES="${2:-1}" +SCENARIO="${3:-cdn}" REGISTRY="$4" # Set Bash color diff --git a/script/build.sh b/script/build.sh index c27f2a7..7a573ff 100644 --- a/script/build.sh +++ b/script/build.sh @@ -5,7 +5,7 @@ if test -z "${DIR}"; then exit -1 fi -REGISTRY="$3" +REGISTRY="$4" # build image(s) in order (to satisfy dependencies) for dep in .8 .7 .6 .5 .4 .3 .2 .1 ''; do diff --git a/script/deployment.cmake b/script/deployment.cmake index 38e4234..fb686fe 100644 --- a/script/deployment.cmake +++ b/script/deployment.cmake @@ -1,2 +1,2 @@ -add_custom_target(start_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}" "${NVODS}" "${NLIVES}" "${REGISTRY}") +add_custom_target(start_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}" "${NVODS}" "${NLIVES}" "${SCENARIO}" "${REGISTRY}") add_custom_target(stop_${service} "${CMAKE_CURRENT_SOURCE_DIR}/stop.sh" "${service}") diff --git a/script/service.cmake b/script/service.cmake index 4f85122..49973b6 100644 --- a/script/service.cmake +++ b/script/service.cmake @@ -1,3 +1,3 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/build.sh") - add_custom_target(build_${service} ALL "${CMAKE_CURRENT_SOURCE_DIR}/build.sh" "${NVODS}" "${NLIVES}" "${REGISTRY}") + add_custom_target(build_${service} ALL "${CMAKE_CURRENT_SOURCE_DIR}/build.sh" "${NVODS}" "${NLIVES}" "${SCENARIO}" "${REGISTRY}") endif() diff --git a/xcode-server/hardware/abr_hls_dash.py b/xcode-server/hardware/abr_hls_dash.py index 38f56da..0e7a1b1 100755 --- a/xcode-server/hardware/abr_hls_dash.py +++ b/xcode-server/hardware/abr_hls_dash.py @@ -17,7 +17,7 @@ def to_kps(bitrate): return str(int(bitrate/1000))+"k" def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, duration=2, - segment_num=0): + segment_num=0,loop=0): ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams", in_file] @@ -51,7 +51,11 @@ def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, cmd = [] cmd_abr = [] - cmd_base = ["ffmpeg", "-hide_banner", "-y", "-i", in_file] + if loop: + cmd_base = ["ffmpeg", "-hide_banner", "-y", "-stream_loop", "0", "-i", in_file] + else: + cmd_base = ["ffmpeg", "-hide_banner", "-y", "-i", in_file] + cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", diff --git a/xcode-server/hardware/main.py b/xcode-server/hardware/main.py index 30fa898..d002116 100755 --- a/xcode-server/hardware/main.py +++ b/xcode-server/hardware/main.py @@ -8,16 +8,21 @@ from abr_hls_dash import GetABRCommand import traceback import time +import json -KAFKA_TOPIC = "content_provider_sched" +KAFKA_TOPIC_VODS = "content_provider_sched_vods" KAFKA_GROUP = "content_provider_dash_hls_creator" ARCHIVE_ROOT = "/var/www/archive" DASH_ROOT = "/var/www/video/dash" HLS_ROOT = "/var/www/video/hls" -def process_stream(stream): - stream_name = stream.split("/")[1] +def process_stream(msg): + stream_name=msg["name"] + stream_type=msg["type"] + stream_parameters=msg["parameters"] + loop= msg["loop"] + stream=stream_type+"/"+stream_name if not isfile(ARCHIVE_ROOT+"/"+stream_name): return @@ -27,39 +32,29 @@ def process_stream(stream): zk.close() return - if stream.endswith(".mpd"): - try: - mkdir(DASH_ROOT+"/"+stream_name) - except: - pass + target_root=HLS_ROOT + if stream_type=="DASH": + target_root=DASH_ROOT - if zk.process_start(): - try: - cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, DASH_ROOT+"/"+stream_name, "dash") - r = call(cmd) - if r: - raise Exception("status code: "+str(r)) - zk.process_end() - except: - print(traceback.format_exc(), flush=True) - zk.process_abort() + try: + mkdir(target_root+"/"+stream_name) + except: + pass - if stream.endswith(".m3u8"): + if zk.process_start(): try: - mkdir(HLS_ROOT+"/"+stream_name) + if stream_parameters: + cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, target_root+"/"+stream_name, stream_type,renditions=stream_parameters,loop=loop) + else: + cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, target_root+"/"+stream_name, stream_type,loop=loop) + print(cmd, flush=True) + r = call(cmd) + if r: + raise Exception("status code: "+str(r)) + zk.process_end() except: - pass - - if zk.process_start(): - try: - cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, HLS_ROOT+"/"+stream_name, "hls") - r = call(cmd) - if r: - raise Exception("status code: "+str(r)) - zk.process_end() - except: - print(traceback.format_exc(), flush=True) - zk.process_abort() + print(traceback.format_exc(), flush=True) + zk.process_abort() zk.close() @@ -67,8 +62,8 @@ def process_stream(stream): c = Consumer(KAFKA_GROUP) while True: try: - for message in c.messages(KAFKA_TOPIC): - process_stream(message) + for message in c.messages(KAFKA_TOPIC_VODS): + process_stream(json.loads(message)) except: print(traceback.format_exc(), flush=True) time.sleep(2) diff --git a/xcode-server/software/abr_hls_dash.py b/xcode-server/software/abr_hls_dash.py index 38f56da..0e7a1b1 100755 --- a/xcode-server/software/abr_hls_dash.py +++ b/xcode-server/software/abr_hls_dash.py @@ -17,7 +17,7 @@ def to_kps(bitrate): return str(int(bitrate/1000))+"k" def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, duration=2, - segment_num=0): + segment_num=0,loop=0): ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams", in_file] @@ -51,7 +51,11 @@ def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, cmd = [] cmd_abr = [] - cmd_base = ["ffmpeg", "-hide_banner", "-y", "-i", in_file] + if loop: + cmd_base = ["ffmpeg", "-hide_banner", "-y", "-stream_loop", "0", "-i", in_file] + else: + cmd_base = ["ffmpeg", "-hide_banner", "-y", "-i", in_file] + cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", diff --git a/xcode-server/software/main.py b/xcode-server/software/main.py index 30fa898..d002116 100755 --- a/xcode-server/software/main.py +++ b/xcode-server/software/main.py @@ -8,16 +8,21 @@ from abr_hls_dash import GetABRCommand import traceback import time +import json -KAFKA_TOPIC = "content_provider_sched" +KAFKA_TOPIC_VODS = "content_provider_sched_vods" KAFKA_GROUP = "content_provider_dash_hls_creator" ARCHIVE_ROOT = "/var/www/archive" DASH_ROOT = "/var/www/video/dash" HLS_ROOT = "/var/www/video/hls" -def process_stream(stream): - stream_name = stream.split("/")[1] +def process_stream(msg): + stream_name=msg["name"] + stream_type=msg["type"] + stream_parameters=msg["parameters"] + loop= msg["loop"] + stream=stream_type+"/"+stream_name if not isfile(ARCHIVE_ROOT+"/"+stream_name): return @@ -27,39 +32,29 @@ def process_stream(stream): zk.close() return - if stream.endswith(".mpd"): - try: - mkdir(DASH_ROOT+"/"+stream_name) - except: - pass + target_root=HLS_ROOT + if stream_type=="DASH": + target_root=DASH_ROOT - if zk.process_start(): - try: - cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, DASH_ROOT+"/"+stream_name, "dash") - r = call(cmd) - if r: - raise Exception("status code: "+str(r)) - zk.process_end() - except: - print(traceback.format_exc(), flush=True) - zk.process_abort() + try: + mkdir(target_root+"/"+stream_name) + except: + pass - if stream.endswith(".m3u8"): + if zk.process_start(): try: - mkdir(HLS_ROOT+"/"+stream_name) + if stream_parameters: + cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, target_root+"/"+stream_name, stream_type,renditions=stream_parameters,loop=loop) + else: + cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, target_root+"/"+stream_name, stream_type,loop=loop) + print(cmd, flush=True) + r = call(cmd) + if r: + raise Exception("status code: "+str(r)) + zk.process_end() except: - pass - - if zk.process_start(): - try: - cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, HLS_ROOT+"/"+stream_name, "hls") - r = call(cmd) - if r: - raise Exception("status code: "+str(r)) - zk.process_end() - except: - print(traceback.format_exc(), flush=True) - zk.process_abort() + print(traceback.format_exc(), flush=True) + zk.process_abort() zk.close() @@ -67,8 +62,8 @@ def process_stream(stream): c = Consumer(KAFKA_GROUP) while True: try: - for message in c.messages(KAFKA_TOPIC): - process_stream(message) + for message in c.messages(KAFKA_TOPIC_VODS): + process_stream(json.loads(message)) except: print(traceback.format_exc(), flush=True) time.sleep(2) From fe4832222a70f5730ac71387c4baaf29e7b3f2ee Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Fri, 18 Dec 2020 12:54:04 -0500 Subject: [PATCH 43/91] add common image --- common/CMakeLists.txt | 2 + common/Dockerfile | 19 +++++++ common/abr_hls_dash.py | 124 +++++++++++++++++++++++++++++++++++++++++ common/build.sh | 5 ++ common/messaging.py | 77 +++++++++++++++++++++++++ common/shell.sh | 6 ++ common/zkstate.py | 48 ++++++++++++++++ 7 files changed, 281 insertions(+) create mode 100644 common/CMakeLists.txt create mode 100644 common/Dockerfile create mode 100755 common/abr_hls_dash.py create mode 100755 common/build.sh create mode 100755 common/messaging.py create mode 100755 common/shell.sh create mode 100755 common/zkstate.py diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt new file mode 100644 index 0000000..4e41dcc --- /dev/null +++ b/common/CMakeLists.txt @@ -0,0 +1,2 @@ +set(service "ovc_cdn_common") +include("${CMAKE_SOURCE_DIR}/script/service.cmake") diff --git a/common/Dockerfile b/common/Dockerfile new file mode 100644 index 0000000..5fb8a46 --- /dev/null +++ b/common/Dockerfile @@ -0,0 +1,19 @@ + +FROM centos:7.6.1810 + +RUN yum install -y -q epel-release && yum install -y -q python36-requests python36-ply python36-psutil && rm -rf /var/cache/yum/* + +COPY *.py /home/ +ENV PYTHONIOENCODING=UTF-8 + +#### +ARG USER=docker +ARG GROUP=docker +ARG UID +ARG GID +## must use ; here to ignore user exist status code +RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} ${GROUP}; \ + [ ${UID} -gt 0 ] && useradd -d /home -M -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} ${USER}; \ + chown -R ${UID}:${GID} /home +#### + diff --git a/common/abr_hls_dash.py b/common/abr_hls_dash.py new file mode 100755 index 0000000..0e7a1b1 --- /dev/null +++ b/common/abr_hls_dash.py @@ -0,0 +1,124 @@ +#!/usr/bin/python3 + +import subprocess +import json + +RENDITIONS_SAMPLE = ( + # resolution bitrate(kbps) audio-rate(kbps) + [3840, 2160, 14000000, 192000], + [2560, 1440, 10000000, 192000], + [1920, 1080, 5000000, 192000], + [1280, 720, 2800000, 192000], + [842, 480, 1400000, 128000], + [640, 360, 800000, 128000] +) + +def to_kps(bitrate): + return str(int(bitrate/1000))+"k" + +def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, duration=2, + segment_num=0,loop=0): + ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", in_file] + + process_id = subprocess.Popen(ffprobe_cmd, stdout=subprocess.PIPE) + # the `multiprocessing.Process` process will wait until + # the call to the `subprocess.Popen` object is completed + process_id.wait() + clip_info = json.loads(process_id.stdout.read().decode("utf-8")) + + keyframe_interval = 0 + frame_height = 0 + clip_v_duration = 0 + clip_a_duration = 0 + + segment_target_duration = duration # try to create a new segment every X seconds + max_bitrate_ratio = 1.07 # maximum accepted bitrate fluctuations + rate_monitor_buffer_ratio = 1.5 # maximum buffer size between bitrate conformance checks + + for item in clip_info["streams"]: + if item["codec_type"] == "video": + keyframe_interval = int(eval(item["avg_frame_rate"])+0.5) + frame_height = item["height"] + clip_v_duration = eval(item["duration"]) + if item["codec_type"] == "audio": + clip_a_duration = eval(item["duration"]) + + if segment_num != 0: + segment_duration = (int)((clip_v_duration+2.0)/segment_num) + if segment_duration < segment_target_duration: + segment_target_duration = segment_duration + + cmd = [] + cmd_abr = [] + if loop: + cmd_base = ["ffmpeg", "-hide_banner", "-y", "-stream_loop", "0", "-i", in_file] + else: + cmd_base = ["ffmpeg", "-hide_banner", "-y", "-i", in_file] + + cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] + cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] + cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", + str(segment_target_duration), "-adaptation_sets", "id=0,streams=v"] + cmd_hls = ["-hls_time", str(segment_target_duration), "-hls_list_size", "0"] + cmd_fade_in_out = ["-an"] + + master_playlist = "#EXTM3U" + "\n" + "#EXT-X-VERSION:3" +"\n" + "#" + "\n" + + count = 0 + default_threshold = 4 + + for item in renditions: + width = item[0] + height = item[1] + v_bitrate = to_kps(item[2]) + a_bitrate = to_kps(item[3]) + maxrate = to_kps(item[2] * max_bitrate_ratio) + bufsize = to_kps(item[2] * rate_monitor_buffer_ratio) + name = str(height) + "p" + + if frame_height < height: + continue + + cmd_1 = [] + cmd_2 = [] + cmd_3 = [] + cmd_4 = [] + + if streaming_type == "hls": + cmd_1 = ["-vf", "scale=w="+str(width)+":"+"h="+str(height)+":"+"force_original_aspect_ratio=decrease" + +","+ "pad=w="+str(width)+":"+"h="+str(height)+":"+"x=(ow-iw)/2"+":"+"y=(oh-ih)/2"] + cmd_2 = ["-b:v", v_bitrate, "-maxrate", maxrate, "-bufsize", bufsize] + cmd_3 = ["-f", streaming_type] + cmd_4 = ["-hls_segment_filename", target+"/"+name+"_"+"%03d.ts", target+"/"+name+".m3u8"] + master_playlist += "#EXT-X-STREAM-INF:BANDWIDTH="+str(item[2])+","+"RESOLUTION="+str(width)+"x"+str(height)+"\n"+name+".m3u8"+"\n" + cmd_abr += cmd_static + cmd_1 + cmd_2 + cmd_fade_in_out + cmd_3 + cmd_hls + cmd_4 + + if streaming_type == "dash": + cmd_1 = ["-map", "0:v", "-b:v"+":"+str(count), v_bitrate, "-s:v"+":"+str(count), str(width)+"x"+str(height), + "-maxrate"+":"+str(count), maxrate, "-bufsize"+":"+str(count), bufsize] + cmd_2 = ["-an"] + cmd_3 = ["-f", streaming_type] + cmd_4 = ["-init_seg_name", name+"-init-stream$RepresentationID$.m4s", "-media_seg_name", + name+"-chunk-stream$RepresentationID$-$Number%05d$.m4s", "-y", target+"/"+name+".mpd"] + if clip_a_duration == 0: + cmd_1 = ["-map", "0:v", "-b:v"+":"+str(count), v_bitrate, "-s:v"+":"+str(count), str(width)+"x"+str(height), + "-maxrate"+":"+str(count), maxrate, "-bufsize"+":"+str(count), bufsize] + cmd_2 = [] + cmd_abr += cmd_1 + cmd_2 + + count += 1 + if default_threshold < count: + break + + if streaming_type == "hls": + cmd = cmd_base + cmd_abr + elif streaming_type == "dash": + cmd = cmd_base + cmd_static + cmd_abr +["-f", "dash"] + cmd_dash + ["-y", target+"/"+"index.mpd"] + + #generate master m3u8 file + if streaming_type == "hls": + with open(target+"/"+"index.m3u8", "w", encoding='utf-8') as f: + f.write(master_playlist) + + return cmd diff --git a/common/build.sh b/common/build.sh new file mode 100755 index 0000000..d04672c --- /dev/null +++ b/common/build.sh @@ -0,0 +1,5 @@ +#!/bin/bash -e + +IMAGE="ovc_cdn_common" +DIR=$(dirname $(readlink -f "$0")) +. "$DIR/../script/build.sh" diff --git a/common/messaging.py b/common/messaging.py new file mode 100755 index 0000000..f11184a --- /dev/null +++ b/common/messaging.py @@ -0,0 +1,77 @@ +#!/usr/bin/python3 + +import socket +from kafka import KafkaProducer, KafkaConsumer, TopicPartition +import traceback +import socket +import time + +KAFKA_HOSTS = ["kafka-service:9092"] + +class Producer(object): + def __init__(self): + super(Producer, self).__init__() + self._client_id = socket.gethostname() + self._producer = None + + def send(self, topic, message): + if not self._producer: + try: + self._producer = KafkaProducer(bootstrap_servers=KAFKA_HOSTS, + client_id=self._client_id, + api_version=(0, 10), acks=0) + except: + print(traceback.format_exc(), flush=True) + self._producer = None + + try: + self._producer.send(topic, message.encode('utf-8')) + except: + print(traceback.format_exc(), flush=True) + + def flush(self): + if self._producer: + self._producer.flush() + + def close(self): + if self._producer: + self._producer.close() + self._producer=None + +class Consumer(object): + def __init__(self, group=None): + super(Consumer, self).__init__() + self._client_id = socket.gethostname() + self._group = group + + def messages(self, topic, timeout=None): + c = KafkaConsumer(topic, bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, + group_id=self._group, auto_offset_reset="earliest", api_version=(0, 10)) + + for msg in c: + yield msg.value.decode('utf-8') + c.close() + + def debug(self, topic): + c = KafkaConsumer(bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, + group_id=None, api_version=(0, 10)) + + # assign/subscribe topic + partitions = c.partitions_for_topic(topic) + if not partitions: + raise Exception("Topic "+topic+" not exist") + c.assign([TopicPartition(topic, p) for p in partitions]) + + # seek to beginning if needed + c.seek_to_beginning() + + # fetch messages + while True: + partitions = c.poll(100) + if partitions: + for p in partitions: + for msg in partitions[p]: + yield msg.value.decode('utf-8') + yield "" + + c.close() diff --git a/common/shell.sh b/common/shell.sh new file mode 100755 index 0000000..c8489b0 --- /dev/null +++ b/common/shell.sh @@ -0,0 +1,6 @@ +#!/bin/bash -e + +IMAGE="ovc_cdn_common" +DIR=$(dirname $(readlink -f "$0")) + +. "$DIR/../script/shell.sh" diff --git a/common/zkstate.py b/common/zkstate.py new file mode 100755 index 0000000..a8c5f43 --- /dev/null +++ b/common/zkstate.py @@ -0,0 +1,48 @@ +#!/usr/bin/python3 + +from kazoo.client import KazooClient +from kazoo.exceptions import NoNodeError, NodeExistsError +from kazoo.protocol.states import KazooState +import traceback +import time + +ZK_HOSTS = 'zookeeper-service:2181' + +class ZKState(object): + def __init__(self, path, name=None): + super(ZKState, self).__init__() + options={"max_tries":-1, "max_delay":5, "ignore_expire":True} + self._zk = KazooClient(hosts=ZK_HOSTS, connection_retry=options) + try: + self._zk.start(timeout=3600) + except: + print(traceback.format_exc(), flush=True) + self._path = path + self._name="" if name is None else name+"." + self._zk.ensure_path(path) + + def processed(self): + return self._zk.exists(self._path+"/"+self._name+"complete") + + def process_start(self): + if self.processed(): + return False + try: + self._zk.create(self._path+"/"+self._name+"processing", ephemeral=True) + return True + except NodeExistsError: # another process wins + return False + + def process_end(self): + try: + self._zk.create(self._path+"/"+self._name+"complete") + except NodeExistsError: + pass + + def process_abort(self): + # the ephemeral node will be deleted upon close + pass + + def close(self): + self._zk.stop() + self._zk.close() From 8941ba0dd5d2a63836929cfaff322b3bc43615d8 Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Fri, 18 Dec 2020 12:55:01 -0500 Subject: [PATCH 44/91] refine the transcode service --- CMakeLists.txt | 6 +- cdn-server/schedule.py | 5 +- deployment/kubernetes/yaml/build.sh | 7 +- .../kubernetes/yaml/live-deploy.yaml.m4 | 2 +- deployment/kubernetes/yaml/start.sh | 3 +- deployment/kubernetes/yaml/vod-deploy.yaml.m4 | 2 +- script/build.sh | 44 ++++--- script/deployment.cmake | 2 +- script/service.cmake | 2 +- xcode-server/CMakeLists.txt | 4 +- xcode-server/{software => Xeon}/Dockerfile | 3 + xcode-server/{hardware => Xeone3}/Dockerfile | 2 + xcode-server/build.sh | 5 + xcode-server/hardware/CMakeLists.txt | 2 - xcode-server/hardware/abr_hls_dash.py | 124 ------------------ xcode-server/hardware/build.sh | 6 - xcode-server/hardware/messaging.py | 77 ----------- xcode-server/hardware/zkstate.py | 48 ------- xcode-server/{hardware => }/main.py | 0 xcode-server/{hardware => }/shell.sh | 3 +- xcode-server/software/CMakeLists.txt | 2 - xcode-server/software/abr_hls_dash.py | 124 ------------------ xcode-server/software/build.sh | 6 - xcode-server/software/main.py | 70 ---------- xcode-server/software/messaging.py | 77 ----------- xcode-server/software/shell.sh | 7 - xcode-server/software/zkstate.py | 48 ------- 27 files changed, 59 insertions(+), 622 deletions(-) rename xcode-server/{software => Xeon}/Dockerfile (90%) rename xcode-server/{hardware => Xeone3}/Dockerfile (90%) create mode 100755 xcode-server/build.sh delete mode 100644 xcode-server/hardware/CMakeLists.txt delete mode 100755 xcode-server/hardware/abr_hls_dash.py delete mode 100755 xcode-server/hardware/build.sh delete mode 100755 xcode-server/hardware/messaging.py delete mode 100755 xcode-server/hardware/zkstate.py rename xcode-server/{hardware => }/main.py (100%) rename xcode-server/{hardware => }/shell.sh (76%) delete mode 100644 xcode-server/software/CMakeLists.txt delete mode 100755 xcode-server/software/abr_hls_dash.py delete mode 100755 xcode-server/software/build.sh delete mode 100755 xcode-server/software/main.py delete mode 100755 xcode-server/software/messaging.py delete mode 100755 xcode-server/software/shell.sh delete mode 100755 xcode-server/software/zkstate.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 8f240fe..9d19bae 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,6 +14,10 @@ if (NOT DEFINED NLIVES) set(NLIVES "1") endif() +if (NOT DEFINED PLATFORM) + set(PLATFORM "Xeon") +endif() + if (NOT DEFINED SCENARIO) set(SCENARIO "cdn") endif() @@ -32,6 +36,6 @@ endforeach() # legal message execute_process(COMMAND printf "\nThis script will build third party components licensed under various open source licenses into your container images. The terms under which those components may be used and distributed can be found with the license document that is provided with those components. Please familiarize yourself with those terms to ensure your distribution of those components complies with the terms of those licenses.\n\n") -execute_process(COMMAND printf "\n-- Setting: SCENARIO=${SCENARIO}\n") +execute_process(COMMAND printf "\n-- Setting: PLATFORM=${PLATFORM} SCENARIO=${SCENARIO}\n") execute_process(COMMAND printf "-- Setting: NVODS=${NVODS}, NLIVES=${NLIVES}\n") execute_process(COMMAND printf "-- Setting: REGISTRY=${REGISTRY}\n") diff --git a/cdn-server/schedule.py b/cdn-server/schedule.py index 1450d93..2aa94e8 100755 --- a/cdn-server/schedule.py +++ b/cdn-server/schedule.py @@ -12,11 +12,12 @@ class ScheduleHandler(web.RequestHandler): @gen.coroutine def get(self): - stream = self.requeist.uri.replace("/schedule/", "") + stream = self.request.uri.replace("/schedule/", "") # schedule producing the stream print("request received to process stream: "+stream, flush=True) producer = Producer() + msg={} msg.update({ "name":stream.split("/")[1], "type":stream.split("/")[0], @@ -26,7 +27,7 @@ def get(self): "target": "file", "platform": "software" }) - producer.send(KAFKA_TOPIC_VODS, msg) + producer.send(KAFKA_TOPIC_VODS, json.dumps(msg)) producer.close() # wait until file is available, return it diff --git a/deployment/kubernetes/yaml/build.sh b/deployment/kubernetes/yaml/build.sh index 9228039..04d02ee 100755 --- a/deployment/kubernetes/yaml/build.sh +++ b/deployment/kubernetes/yaml/build.sh @@ -4,11 +4,12 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" SCENARIO="${3:-cdn}" -REGISTRY="$4" +PLATFORM="${4:-Xeon}" +REGISTRY="$5" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') . "${DIR}/../volume-info.sh" -echo "NVODS=${NVODS} NLIVES=${NLIVES} SCENARIO=${SCENARIO}" +echo "NVODS=${NVODS} NLIVES=${NLIVES} SCENARIO=${SCENARIO} PLATFORM=${PLATFORM}" for template in $(find "${DIR}" -maxdepth 1 -name "*.yaml.m4" -print); do - m4 -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DSCENARIO=${SCENARIO} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${template/.m4/}" + m4 -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DSCENARIO=${SCENARIO} -DPLATFORM=${PLATFORM} -DHOSTIP=${HOSTIP} -DREGISTRY_PREFIX=${REGISTRY} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${template}" > "${template/.m4/}" done diff --git a/deployment/kubernetes/yaml/live-deploy.yaml.m4 b/deployment/kubernetes/yaml/live-deploy.yaml.m4 index cedc44f..5467646 100644 --- a/deployment/kubernetes/yaml/live-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/live-deploy.yaml.m4 @@ -23,7 +23,7 @@ spec: enableServiceLinks: false containers: - name: live-defn(`LIDX') - image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest + image: defn(`REGISTRY_PREFIX')`ovc_transcode_'defn(`PLATFORM_SUFFIX'):latest imagePullPolicy: IfNotPresent resources: limits: diff --git a/deployment/kubernetes/yaml/start.sh b/deployment/kubernetes/yaml/start.sh index 56bb9e3..bc45a49 100755 --- a/deployment/kubernetes/yaml/start.sh +++ b/deployment/kubernetes/yaml/start.sh @@ -4,7 +4,8 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" SCENARIO="${3:-cdn}" -REGISTRY="$4" +PLATFORM="${4:-Xeon}" +REGISTRY="$5" # Set Bash color ECHO_PREFIX_INFO="\033[1;32;40mINFO...\033[0;0m" diff --git a/deployment/kubernetes/yaml/vod-deploy.yaml.m4 b/deployment/kubernetes/yaml/vod-deploy.yaml.m4 index e131cad..3e9922d 100644 --- a/deployment/kubernetes/yaml/vod-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/vod-deploy.yaml.m4 @@ -20,7 +20,7 @@ spec: enableServiceLinks: false containers: - name: vod - image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest + image: defn(`REGISTRY_PREFIX')`ovc_transcode_'defn(`PLATFORM_SUFFIX'):latest imagePullPolicy: IfNotPresent resources: limits: diff --git a/script/build.sh b/script/build.sh index 7a573ff..e34f339 100644 --- a/script/build.sh +++ b/script/build.sh @@ -5,24 +5,34 @@ if test -z "${DIR}"; then exit -1 fi -REGISTRY="$4" +PLATFORM="${4:-Xeon}" +REGISTRY="$5" -# build image(s) in order (to satisfy dependencies) -for dep in .8 .7 .6 .5 .4 .3 .2 .1 ''; do - if test -f "${DIR}/Dockerfile$dep"; then - image=$(grep -m1 '#' "$DIR/Dockerfile$dep" | cut -d' ' -f2) - if test -z "$dep"; then image="$IMAGE"; fi - - if grep -q 'AS build' "${DIR}/Dockerfile$dep"; then - docker build --network=host --file="${DIR}/Dockerfile$dep" --target build -t "$image:build" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') --build-arg UID=$(id -u) --build-arg GID=$(id -g) - fi - - docker build --network=host --file="${DIR}/Dockerfile$dep" -t "$image:latest" "$DIR" $(env | grep -E '_(proxy|REPO|VER)=' | sed 's/^/--build-arg /') --build-arg UID=$(id -u) --build-arg GID=$(id -g) +build_docker() { + docker_file="$1" + shift + image_name="$1" + shift + if test -f "$docker_file.m4"; then + m4 -I "$(dirname $docker_file)" "$docker_file.m4" > "$docker_file" + fi + (cd "$DIR"; docker build --network host --file="$docker_file" "$@" -t "$image_name" "$DIR" $(env | cut -f1 -d= | grep -E '_(proxy|REPO|VER)$' | sed 's/^/--build-arg /') --build-arg UID=$(id -u) --build-arg GID=$(id -g)) - # if REGISTRY is specified, push image to the private registry - if [ -n "$REGISTRY" ]; then - docker tag "$image:latest" "$REGISTRY$image:latest" - docker push "$REGISTRY$image:latest" - fi + # if REGISTRY is specified, push image to the private registry + if [ -n "$REGISTRY" ]; then + docker tag "$image_name" "$REGISTRY$image_name" + docker push "$REGISTRY$image_name" fi +} + +# build image(s) in order (to satisfy dependencies) +#for dep in .8 .7 .6 .5 .4 .3 .2 .1 ''; do +for dep in '.5.*' '.4.*' '.3.*' '.2.*' '.1.*' '.0.*' ''; do + dirs=("$DIR/$PLATFORM" "$DIR") + for dockerfile in $(find "${dirs[@]}" -maxdepth 1 -name "Dockerfile$dep" -print 2>/dev/null); do + echo ${dirs[@]} + image=$(head -n 1 "$dockerfile" | grep '# ' | cut -d' ' -f2) + if test -z "$image"; then image="$IMAGE"; fi + build_docker "$dockerfile" "$image" + done done diff --git a/script/deployment.cmake b/script/deployment.cmake index fb686fe..21a2477 100644 --- a/script/deployment.cmake +++ b/script/deployment.cmake @@ -1,2 +1,2 @@ -add_custom_target(start_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}" "${NVODS}" "${NLIVES}" "${SCENARIO}" "${REGISTRY}") +add_custom_target(start_${service} "${CMAKE_CURRENT_SOURCE_DIR}/start.sh" "${service}" "${NVODS}" "${NLIVES}" "${SCENARIO}" "${PLATFORM}" "${REGISTRY}") add_custom_target(stop_${service} "${CMAKE_CURRENT_SOURCE_DIR}/stop.sh" "${service}") diff --git a/script/service.cmake b/script/service.cmake index 49973b6..b9daeea 100644 --- a/script/service.cmake +++ b/script/service.cmake @@ -1,3 +1,3 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/build.sh") - add_custom_target(build_${service} ALL "${CMAKE_CURRENT_SOURCE_DIR}/build.sh" "${NVODS}" "${NLIVES}" "${SCENARIO}" "${REGISTRY}") + add_custom_target(build_${service} ALL "${CMAKE_CURRENT_SOURCE_DIR}/build.sh" "${NVODS}" "${NLIVES}" "${SCENARIO}" "${PLATFORM}" "${REGISTRY}") endif() diff --git a/xcode-server/CMakeLists.txt b/xcode-server/CMakeLists.txt index 162c01f..bddb21a 100644 --- a/xcode-server/CMakeLists.txt +++ b/xcode-server/CMakeLists.txt @@ -1 +1,3 @@ -include("${CMAKE_SOURCE_DIR}/script/scan-all.cmake") +set(service "ovc_transcode_service") +include("${CMAKE_SOURCE_DIR}/script/service.cmake") +add_dependencies(build_${service} build_ovc_cdn_common) diff --git a/xcode-server/software/Dockerfile b/xcode-server/Xeon/Dockerfile similarity index 90% rename from xcode-server/software/Dockerfile rename to xcode-server/Xeon/Dockerfile index b60aac2..70047b5 100644 --- a/xcode-server/software/Dockerfile +++ b/xcode-server/Xeon/Dockerfile @@ -1,6 +1,9 @@ +# ovc_transcode_xeon FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:20.7 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-tornado python3-kafka python3-kazoo && rm -rf /var/lib/apt/lists/* + +COPY --from=ovc_cdn_common /home/ /home/ COPY *.py /home/ CMD ["/home/main.py"] WORKDIR /home diff --git a/xcode-server/hardware/Dockerfile b/xcode-server/Xeone3/Dockerfile similarity index 90% rename from xcode-server/hardware/Dockerfile rename to xcode-server/Xeone3/Dockerfile index db2336c..375d0cb 100644 --- a/xcode-server/hardware/Dockerfile +++ b/xcode-server/Xeone3/Dockerfile @@ -1,8 +1,10 @@ +# ovc_transcode_xeone3 FROM openvisualcloud/xeone3-ubuntu1804-media-ffmpeg:20.7 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-tornado python3-kafka python3-kazoo && rm -rf /var/lib/apt/lists/* +COPY --from=ovc_cdn_common /home/ /home/ COPY *.py /home/ CMD ["/home/main.py"] WORKDIR /home diff --git a/xcode-server/build.sh b/xcode-server/build.sh new file mode 100755 index 0000000..0121dd9 --- /dev/null +++ b/xcode-server/build.sh @@ -0,0 +1,5 @@ +#!/bin/bash -e + +DIR=$(dirname $(readlink -f "$0")) + +. "${DIR}/../script/build.sh" diff --git a/xcode-server/hardware/CMakeLists.txt b/xcode-server/hardware/CMakeLists.txt deleted file mode 100644 index 5c5eb27..0000000 --- a/xcode-server/hardware/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -set(service "ovc_hardware_transcode_service") -include("${CMAKE_SOURCE_DIR}/script/service.cmake") diff --git a/xcode-server/hardware/abr_hls_dash.py b/xcode-server/hardware/abr_hls_dash.py deleted file mode 100755 index 0e7a1b1..0000000 --- a/xcode-server/hardware/abr_hls_dash.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/python3 - -import subprocess -import json - -RENDITIONS_SAMPLE = ( - # resolution bitrate(kbps) audio-rate(kbps) - [3840, 2160, 14000000, 192000], - [2560, 1440, 10000000, 192000], - [1920, 1080, 5000000, 192000], - [1280, 720, 2800000, 192000], - [842, 480, 1400000, 128000], - [640, 360, 800000, 128000] -) - -def to_kps(bitrate): - return str(int(bitrate/1000))+"k" - -def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, duration=2, - segment_num=0,loop=0): - ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", - "-show_streams", in_file] - - process_id = subprocess.Popen(ffprobe_cmd, stdout=subprocess.PIPE) - # the `multiprocessing.Process` process will wait until - # the call to the `subprocess.Popen` object is completed - process_id.wait() - clip_info = json.loads(process_id.stdout.read().decode("utf-8")) - - keyframe_interval = 0 - frame_height = 0 - clip_v_duration = 0 - clip_a_duration = 0 - - segment_target_duration = duration # try to create a new segment every X seconds - max_bitrate_ratio = 1.07 # maximum accepted bitrate fluctuations - rate_monitor_buffer_ratio = 1.5 # maximum buffer size between bitrate conformance checks - - for item in clip_info["streams"]: - if item["codec_type"] == "video": - keyframe_interval = int(eval(item["avg_frame_rate"])+0.5) - frame_height = item["height"] - clip_v_duration = eval(item["duration"]) - if item["codec_type"] == "audio": - clip_a_duration = eval(item["duration"]) - - if segment_num != 0: - segment_duration = (int)((clip_v_duration+2.0)/segment_num) - if segment_duration < segment_target_duration: - segment_target_duration = segment_duration - - cmd = [] - cmd_abr = [] - if loop: - cmd_base = ["ffmpeg", "-hide_banner", "-y", "-stream_loop", "0", "-i", in_file] - else: - cmd_base = ["ffmpeg", "-hide_banner", "-y", "-i", in_file] - - cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] - cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] - cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", - str(segment_target_duration), "-adaptation_sets", "id=0,streams=v"] - cmd_hls = ["-hls_time", str(segment_target_duration), "-hls_list_size", "0"] - cmd_fade_in_out = ["-an"] - - master_playlist = "#EXTM3U" + "\n" + "#EXT-X-VERSION:3" +"\n" + "#" + "\n" - - count = 0 - default_threshold = 4 - - for item in renditions: - width = item[0] - height = item[1] - v_bitrate = to_kps(item[2]) - a_bitrate = to_kps(item[3]) - maxrate = to_kps(item[2] * max_bitrate_ratio) - bufsize = to_kps(item[2] * rate_monitor_buffer_ratio) - name = str(height) + "p" - - if frame_height < height: - continue - - cmd_1 = [] - cmd_2 = [] - cmd_3 = [] - cmd_4 = [] - - if streaming_type == "hls": - cmd_1 = ["-vf", "scale=w="+str(width)+":"+"h="+str(height)+":"+"force_original_aspect_ratio=decrease" - +","+ "pad=w="+str(width)+":"+"h="+str(height)+":"+"x=(ow-iw)/2"+":"+"y=(oh-ih)/2"] - cmd_2 = ["-b:v", v_bitrate, "-maxrate", maxrate, "-bufsize", bufsize] - cmd_3 = ["-f", streaming_type] - cmd_4 = ["-hls_segment_filename", target+"/"+name+"_"+"%03d.ts", target+"/"+name+".m3u8"] - master_playlist += "#EXT-X-STREAM-INF:BANDWIDTH="+str(item[2])+","+"RESOLUTION="+str(width)+"x"+str(height)+"\n"+name+".m3u8"+"\n" - cmd_abr += cmd_static + cmd_1 + cmd_2 + cmd_fade_in_out + cmd_3 + cmd_hls + cmd_4 - - if streaming_type == "dash": - cmd_1 = ["-map", "0:v", "-b:v"+":"+str(count), v_bitrate, "-s:v"+":"+str(count), str(width)+"x"+str(height), - "-maxrate"+":"+str(count), maxrate, "-bufsize"+":"+str(count), bufsize] - cmd_2 = ["-an"] - cmd_3 = ["-f", streaming_type] - cmd_4 = ["-init_seg_name", name+"-init-stream$RepresentationID$.m4s", "-media_seg_name", - name+"-chunk-stream$RepresentationID$-$Number%05d$.m4s", "-y", target+"/"+name+".mpd"] - if clip_a_duration == 0: - cmd_1 = ["-map", "0:v", "-b:v"+":"+str(count), v_bitrate, "-s:v"+":"+str(count), str(width)+"x"+str(height), - "-maxrate"+":"+str(count), maxrate, "-bufsize"+":"+str(count), bufsize] - cmd_2 = [] - cmd_abr += cmd_1 + cmd_2 - - count += 1 - if default_threshold < count: - break - - if streaming_type == "hls": - cmd = cmd_base + cmd_abr - elif streaming_type == "dash": - cmd = cmd_base + cmd_static + cmd_abr +["-f", "dash"] + cmd_dash + ["-y", target+"/"+"index.mpd"] - - #generate master m3u8 file - if streaming_type == "hls": - with open(target+"/"+"index.m3u8", "w", encoding='utf-8') as f: - f.write(master_playlist) - - return cmd diff --git a/xcode-server/hardware/build.sh b/xcode-server/hardware/build.sh deleted file mode 100755 index 43165c5..0000000 --- a/xcode-server/hardware/build.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -e - -IMAGE="ovc_hardware_transcode_service" -DIR=$(dirname $(readlink -f "$0")) - -. "${DIR}/../../script/build.sh" diff --git a/xcode-server/hardware/messaging.py b/xcode-server/hardware/messaging.py deleted file mode 100755 index f11184a..0000000 --- a/xcode-server/hardware/messaging.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/python3 - -import socket -from kafka import KafkaProducer, KafkaConsumer, TopicPartition -import traceback -import socket -import time - -KAFKA_HOSTS = ["kafka-service:9092"] - -class Producer(object): - def __init__(self): - super(Producer, self).__init__() - self._client_id = socket.gethostname() - self._producer = None - - def send(self, topic, message): - if not self._producer: - try: - self._producer = KafkaProducer(bootstrap_servers=KAFKA_HOSTS, - client_id=self._client_id, - api_version=(0, 10), acks=0) - except: - print(traceback.format_exc(), flush=True) - self._producer = None - - try: - self._producer.send(topic, message.encode('utf-8')) - except: - print(traceback.format_exc(), flush=True) - - def flush(self): - if self._producer: - self._producer.flush() - - def close(self): - if self._producer: - self._producer.close() - self._producer=None - -class Consumer(object): - def __init__(self, group=None): - super(Consumer, self).__init__() - self._client_id = socket.gethostname() - self._group = group - - def messages(self, topic, timeout=None): - c = KafkaConsumer(topic, bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, - group_id=self._group, auto_offset_reset="earliest", api_version=(0, 10)) - - for msg in c: - yield msg.value.decode('utf-8') - c.close() - - def debug(self, topic): - c = KafkaConsumer(bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, - group_id=None, api_version=(0, 10)) - - # assign/subscribe topic - partitions = c.partitions_for_topic(topic) - if not partitions: - raise Exception("Topic "+topic+" not exist") - c.assign([TopicPartition(topic, p) for p in partitions]) - - # seek to beginning if needed - c.seek_to_beginning() - - # fetch messages - while True: - partitions = c.poll(100) - if partitions: - for p in partitions: - for msg in partitions[p]: - yield msg.value.decode('utf-8') - yield "" - - c.close() diff --git a/xcode-server/hardware/zkstate.py b/xcode-server/hardware/zkstate.py deleted file mode 100755 index a8c5f43..0000000 --- a/xcode-server/hardware/zkstate.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/python3 - -from kazoo.client import KazooClient -from kazoo.exceptions import NoNodeError, NodeExistsError -from kazoo.protocol.states import KazooState -import traceback -import time - -ZK_HOSTS = 'zookeeper-service:2181' - -class ZKState(object): - def __init__(self, path, name=None): - super(ZKState, self).__init__() - options={"max_tries":-1, "max_delay":5, "ignore_expire":True} - self._zk = KazooClient(hosts=ZK_HOSTS, connection_retry=options) - try: - self._zk.start(timeout=3600) - except: - print(traceback.format_exc(), flush=True) - self._path = path - self._name="" if name is None else name+"." - self._zk.ensure_path(path) - - def processed(self): - return self._zk.exists(self._path+"/"+self._name+"complete") - - def process_start(self): - if self.processed(): - return False - try: - self._zk.create(self._path+"/"+self._name+"processing", ephemeral=True) - return True - except NodeExistsError: # another process wins - return False - - def process_end(self): - try: - self._zk.create(self._path+"/"+self._name+"complete") - except NodeExistsError: - pass - - def process_abort(self): - # the ephemeral node will be deleted upon close - pass - - def close(self): - self._zk.stop() - self._zk.close() diff --git a/xcode-server/hardware/main.py b/xcode-server/main.py similarity index 100% rename from xcode-server/hardware/main.py rename to xcode-server/main.py diff --git a/xcode-server/hardware/shell.sh b/xcode-server/shell.sh similarity index 76% rename from xcode-server/hardware/shell.sh rename to xcode-server/shell.sh index 9dc61e0..46d9228 100755 --- a/xcode-server/hardware/shell.sh +++ b/xcode-server/shell.sh @@ -1,7 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_hardware_transcode_service" DIR=$(dirname $(readlink -f "$0")) OPTIONS=("--volume=${DIR}/../volume/video/archive:/var/www/archive:ro" "--volume=${DIR}/../volume/video/dash:/var/www/dash:rw" "--volume=${DIR}/../volume/video/hls:/var/www/hls:rw") -. "${DIR}/../../script/shell.sh" +. "${DIR}/../script/shell.sh" diff --git a/xcode-server/software/CMakeLists.txt b/xcode-server/software/CMakeLists.txt deleted file mode 100644 index c2b50c6..0000000 --- a/xcode-server/software/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -set(service "ovc_software_transcode_service") -include("${CMAKE_SOURCE_DIR}/script/service.cmake") diff --git a/xcode-server/software/abr_hls_dash.py b/xcode-server/software/abr_hls_dash.py deleted file mode 100755 index 0e7a1b1..0000000 --- a/xcode-server/software/abr_hls_dash.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/python3 - -import subprocess -import json - -RENDITIONS_SAMPLE = ( - # resolution bitrate(kbps) audio-rate(kbps) - [3840, 2160, 14000000, 192000], - [2560, 1440, 10000000, 192000], - [1920, 1080, 5000000, 192000], - [1280, 720, 2800000, 192000], - [842, 480, 1400000, 128000], - [640, 360, 800000, 128000] -) - -def to_kps(bitrate): - return str(int(bitrate/1000))+"k" - -def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, duration=2, - segment_num=0,loop=0): - ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", - "-show_streams", in_file] - - process_id = subprocess.Popen(ffprobe_cmd, stdout=subprocess.PIPE) - # the `multiprocessing.Process` process will wait until - # the call to the `subprocess.Popen` object is completed - process_id.wait() - clip_info = json.loads(process_id.stdout.read().decode("utf-8")) - - keyframe_interval = 0 - frame_height = 0 - clip_v_duration = 0 - clip_a_duration = 0 - - segment_target_duration = duration # try to create a new segment every X seconds - max_bitrate_ratio = 1.07 # maximum accepted bitrate fluctuations - rate_monitor_buffer_ratio = 1.5 # maximum buffer size between bitrate conformance checks - - for item in clip_info["streams"]: - if item["codec_type"] == "video": - keyframe_interval = int(eval(item["avg_frame_rate"])+0.5) - frame_height = item["height"] - clip_v_duration = eval(item["duration"]) - if item["codec_type"] == "audio": - clip_a_duration = eval(item["duration"]) - - if segment_num != 0: - segment_duration = (int)((clip_v_duration+2.0)/segment_num) - if segment_duration < segment_target_duration: - segment_target_duration = segment_duration - - cmd = [] - cmd_abr = [] - if loop: - cmd_base = ["ffmpeg", "-hide_banner", "-y", "-stream_loop", "0", "-i", in_file] - else: - cmd_base = ["ffmpeg", "-hide_banner", "-y", "-i", in_file] - - cmd_static = ["-c:v", "libx264", "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"] - cmd_static += ["-g", str(keyframe_interval), "-keyint_min", str(keyframe_interval)] - cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", - str(segment_target_duration), "-adaptation_sets", "id=0,streams=v"] - cmd_hls = ["-hls_time", str(segment_target_duration), "-hls_list_size", "0"] - cmd_fade_in_out = ["-an"] - - master_playlist = "#EXTM3U" + "\n" + "#EXT-X-VERSION:3" +"\n" + "#" + "\n" - - count = 0 - default_threshold = 4 - - for item in renditions: - width = item[0] - height = item[1] - v_bitrate = to_kps(item[2]) - a_bitrate = to_kps(item[3]) - maxrate = to_kps(item[2] * max_bitrate_ratio) - bufsize = to_kps(item[2] * rate_monitor_buffer_ratio) - name = str(height) + "p" - - if frame_height < height: - continue - - cmd_1 = [] - cmd_2 = [] - cmd_3 = [] - cmd_4 = [] - - if streaming_type == "hls": - cmd_1 = ["-vf", "scale=w="+str(width)+":"+"h="+str(height)+":"+"force_original_aspect_ratio=decrease" - +","+ "pad=w="+str(width)+":"+"h="+str(height)+":"+"x=(ow-iw)/2"+":"+"y=(oh-ih)/2"] - cmd_2 = ["-b:v", v_bitrate, "-maxrate", maxrate, "-bufsize", bufsize] - cmd_3 = ["-f", streaming_type] - cmd_4 = ["-hls_segment_filename", target+"/"+name+"_"+"%03d.ts", target+"/"+name+".m3u8"] - master_playlist += "#EXT-X-STREAM-INF:BANDWIDTH="+str(item[2])+","+"RESOLUTION="+str(width)+"x"+str(height)+"\n"+name+".m3u8"+"\n" - cmd_abr += cmd_static + cmd_1 + cmd_2 + cmd_fade_in_out + cmd_3 + cmd_hls + cmd_4 - - if streaming_type == "dash": - cmd_1 = ["-map", "0:v", "-b:v"+":"+str(count), v_bitrate, "-s:v"+":"+str(count), str(width)+"x"+str(height), - "-maxrate"+":"+str(count), maxrate, "-bufsize"+":"+str(count), bufsize] - cmd_2 = ["-an"] - cmd_3 = ["-f", streaming_type] - cmd_4 = ["-init_seg_name", name+"-init-stream$RepresentationID$.m4s", "-media_seg_name", - name+"-chunk-stream$RepresentationID$-$Number%05d$.m4s", "-y", target+"/"+name+".mpd"] - if clip_a_duration == 0: - cmd_1 = ["-map", "0:v", "-b:v"+":"+str(count), v_bitrate, "-s:v"+":"+str(count), str(width)+"x"+str(height), - "-maxrate"+":"+str(count), maxrate, "-bufsize"+":"+str(count), bufsize] - cmd_2 = [] - cmd_abr += cmd_1 + cmd_2 - - count += 1 - if default_threshold < count: - break - - if streaming_type == "hls": - cmd = cmd_base + cmd_abr - elif streaming_type == "dash": - cmd = cmd_base + cmd_static + cmd_abr +["-f", "dash"] + cmd_dash + ["-y", target+"/"+"index.mpd"] - - #generate master m3u8 file - if streaming_type == "hls": - with open(target+"/"+"index.m3u8", "w", encoding='utf-8') as f: - f.write(master_playlist) - - return cmd diff --git a/xcode-server/software/build.sh b/xcode-server/software/build.sh deleted file mode 100755 index 2b83d0d..0000000 --- a/xcode-server/software/build.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -e - -IMAGE="ovc_software_transcode_service" -DIR=$(dirname $(readlink -f "$0")) - -. "${DIR}/../../script/build.sh" diff --git a/xcode-server/software/main.py b/xcode-server/software/main.py deleted file mode 100755 index d002116..0000000 --- a/xcode-server/software/main.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/python3 - -from os.path import isfile -from subprocess import call -from os import mkdir -from zkstate import ZKState -from messaging import Consumer -from abr_hls_dash import GetABRCommand -import traceback -import time -import json - -KAFKA_TOPIC_VODS = "content_provider_sched_vods" -KAFKA_GROUP = "content_provider_dash_hls_creator" - -ARCHIVE_ROOT = "/var/www/archive" -DASH_ROOT = "/var/www/video/dash" -HLS_ROOT = "/var/www/video/hls" - -def process_stream(msg): - stream_name=msg["name"] - stream_type=msg["type"] - stream_parameters=msg["parameters"] - loop= msg["loop"] - stream=stream_type+"/"+stream_name - - if not isfile(ARCHIVE_ROOT+"/"+stream_name): - return - - zk = ZKState("/content_provider_transcoder/"+ARCHIVE_ROOT+"/"+stream) - if zk.processed(): - zk.close() - return - - target_root=HLS_ROOT - if stream_type=="DASH": - target_root=DASH_ROOT - - try: - mkdir(target_root+"/"+stream_name) - except: - pass - - if zk.process_start(): - try: - if stream_parameters: - cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, target_root+"/"+stream_name, stream_type,renditions=stream_parameters,loop=loop) - else: - cmd = GetABRCommand(ARCHIVE_ROOT+"/"+stream_name, target_root+"/"+stream_name, stream_type,loop=loop) - print(cmd, flush=True) - r = call(cmd) - if r: - raise Exception("status code: "+str(r)) - zk.process_end() - except: - print(traceback.format_exc(), flush=True) - zk.process_abort() - - zk.close() - -if __name__ == "__main__": - c = Consumer(KAFKA_GROUP) - while True: - try: - for message in c.messages(KAFKA_TOPIC_VODS): - process_stream(json.loads(message)) - except: - print(traceback.format_exc(), flush=True) - time.sleep(2) - c.close() diff --git a/xcode-server/software/messaging.py b/xcode-server/software/messaging.py deleted file mode 100755 index f11184a..0000000 --- a/xcode-server/software/messaging.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/python3 - -import socket -from kafka import KafkaProducer, KafkaConsumer, TopicPartition -import traceback -import socket -import time - -KAFKA_HOSTS = ["kafka-service:9092"] - -class Producer(object): - def __init__(self): - super(Producer, self).__init__() - self._client_id = socket.gethostname() - self._producer = None - - def send(self, topic, message): - if not self._producer: - try: - self._producer = KafkaProducer(bootstrap_servers=KAFKA_HOSTS, - client_id=self._client_id, - api_version=(0, 10), acks=0) - except: - print(traceback.format_exc(), flush=True) - self._producer = None - - try: - self._producer.send(topic, message.encode('utf-8')) - except: - print(traceback.format_exc(), flush=True) - - def flush(self): - if self._producer: - self._producer.flush() - - def close(self): - if self._producer: - self._producer.close() - self._producer=None - -class Consumer(object): - def __init__(self, group=None): - super(Consumer, self).__init__() - self._client_id = socket.gethostname() - self._group = group - - def messages(self, topic, timeout=None): - c = KafkaConsumer(topic, bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, - group_id=self._group, auto_offset_reset="earliest", api_version=(0, 10)) - - for msg in c: - yield msg.value.decode('utf-8') - c.close() - - def debug(self, topic): - c = KafkaConsumer(bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, - group_id=None, api_version=(0, 10)) - - # assign/subscribe topic - partitions = c.partitions_for_topic(topic) - if not partitions: - raise Exception("Topic "+topic+" not exist") - c.assign([TopicPartition(topic, p) for p in partitions]) - - # seek to beginning if needed - c.seek_to_beginning() - - # fetch messages - while True: - partitions = c.poll(100) - if partitions: - for p in partitions: - for msg in partitions[p]: - yield msg.value.decode('utf-8') - yield "" - - c.close() diff --git a/xcode-server/software/shell.sh b/xcode-server/software/shell.sh deleted file mode 100755 index 16024bf..0000000 --- a/xcode-server/software/shell.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -e - -IMAGE="ovc_software_transcode_service" -DIR=$(dirname $(readlink -f "$0")) -OPTIONS=("--volume=${DIR}/../volume/video/archive:/var/www/archive:ro" "--volume=${DIR}/../volume/video/dash:/var/www/dash:rw" "--volume=${DIR}/../volume/video/hls:/var/www/hls:rw") - -. "${DIR}/../../script/shell.sh" diff --git a/xcode-server/software/zkstate.py b/xcode-server/software/zkstate.py deleted file mode 100755 index a8c5f43..0000000 --- a/xcode-server/software/zkstate.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/python3 - -from kazoo.client import KazooClient -from kazoo.exceptions import NoNodeError, NodeExistsError -from kazoo.protocol.states import KazooState -import traceback -import time - -ZK_HOSTS = 'zookeeper-service:2181' - -class ZKState(object): - def __init__(self, path, name=None): - super(ZKState, self).__init__() - options={"max_tries":-1, "max_delay":5, "ignore_expire":True} - self._zk = KazooClient(hosts=ZK_HOSTS, connection_retry=options) - try: - self._zk.start(timeout=3600) - except: - print(traceback.format_exc(), flush=True) - self._path = path - self._name="" if name is None else name+"." - self._zk.ensure_path(path) - - def processed(self): - return self._zk.exists(self._path+"/"+self._name+"complete") - - def process_start(self): - if self.processed(): - return False - try: - self._zk.create(self._path+"/"+self._name+"processing", ephemeral=True) - return True - except NodeExistsError: # another process wins - return False - - def process_end(self): - try: - self._zk.create(self._path+"/"+self._name+"complete") - except NodeExistsError: - pass - - def process_abort(self): - # the ephemeral node will be deleted upon close - pass - - def close(self): - self._zk.stop() - self._zk.close() From 47368f4bedb07c1d61c52e71545a65f3cc8932ea Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Mon, 21 Dec 2020 04:47:51 -0500 Subject: [PATCH 45/91] minor fix --- cdn-server/schedule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cdn-server/schedule.py b/cdn-server/schedule.py index 2aa94e8..6717638 100755 --- a/cdn-server/schedule.py +++ b/cdn-server/schedule.py @@ -7,7 +7,7 @@ import json KAFKA_TOPIC_VODS = "content_provider_sched_vods" -DASHLS_ROOT = "/var/www" +DASHLS_ROOT = "/var/www/video" class ScheduleHandler(web.RequestHandler): @gen.coroutine From f7e592e7f6a6af09d2a55791e6223a122f05ddc9 Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Mon, 21 Dec 2020 09:57:36 -0500 Subject: [PATCH 46/91] add the live streaming in batch mode --- batch/main.py | 24 +++++++++--------- batch/transcoding.json | 4 +-- cdn-server/schedule.py | 4 +-- common/abr_hls_dash.py | 40 ++++++++++++++++++++++++++++-- xcode-server/main.py | 56 +++++++++++++++++++++++++++++++++++++----- 5 files changed, 105 insertions(+), 23 deletions(-) diff --git a/batch/main.py b/batch/main.py index e05d85e..c1da68b 100755 --- a/batch/main.py +++ b/batch/main.py @@ -6,8 +6,7 @@ from os import listdir import json -KAFKA_TOPIC_VODS = "content_provider_sched_vods" -KAFKA_TOPIC_LIVES = "content_provider_sched_lives" +KAFKA_TOPIC = "content_provider_sched" DASHLS_ROOT = "/var/www" ARCHIVE_ROOT = "/var/www/archive" @@ -19,21 +18,24 @@ with open(config_file,"rt") as fd: info=json.load(fd) -print(info,flush=True) - producer = Producer() -for stream in info[0]["vods"]: +for idx,stream in enumerate(info[0]["vods"]): # schedule producing the stream if stream["name"] in streams: msg=stream + msg.update({"idx": idx}) print("start VOD transccoding on {} with {}: ".format(stream["name"],stream["type"]), flush=True) print(msg,flush=True) - producer.send(KAFKA_TOPIC_VODS, json.dumps(msg)) - # wait until file is available, return it - start_time = time.time() - while time.time() - start_time < 60: - if isfile(DASHLS_ROOT+"/"+stream["name"]): break - time.sleep(1) + producer.send(KAFKA_TOPIC, json.dumps(msg)) + +for idx,stream in enumerate(info[0]["lives"]): + # schedule producing the stream + if stream["name"] in streams: + msg=stream + msg.update({"idx": idx}) + print("start LIVE transccoding on {} with {}: ".format(stream["name"],stream["type"]), flush=True) + print(msg,flush=True) + producer.send(KAFKA_TOPIC, json.dumps(msg)) producer.close() diff --git a/batch/transcoding.json b/batch/transcoding.json index 7759134..29b8770 100644 --- a/batch/transcoding.json +++ b/batch/transcoding.json @@ -38,7 +38,7 @@ ], "codec": "AVC", "loop": 1, - "target": "rtmp", + "target": "rtmp://cdn-service/", "platform": "software" },{ "name": "bbb_sunflower_1080p_30fps_normal.mp4", @@ -48,7 +48,7 @@ ], "codec": "AVC", "loop": 1, - "target": "rtmp", + "target": "rtmp://cdn-service/", "platform": "software" }] }] diff --git a/cdn-server/schedule.py b/cdn-server/schedule.py index 6717638..2d6fb8d 100755 --- a/cdn-server/schedule.py +++ b/cdn-server/schedule.py @@ -6,7 +6,7 @@ import time import json -KAFKA_TOPIC_VODS = "content_provider_sched_vods" +KAFKA_TOPIC = "content_provider_sched" DASHLS_ROOT = "/var/www/video" class ScheduleHandler(web.RequestHandler): @@ -27,7 +27,7 @@ def get(self): "target": "file", "platform": "software" }) - producer.send(KAFKA_TOPIC_VODS, json.dumps(msg)) + producer.send(KAFKA_TOPIC, json.dumps(msg)) producer.close() # wait until file is available, return it diff --git a/common/abr_hls_dash.py b/common/abr_hls_dash.py index 0e7a1b1..37092ba 100755 --- a/common/abr_hls_dash.py +++ b/common/abr_hls_dash.py @@ -16,8 +16,7 @@ def to_kps(bitrate): return str(int(bitrate/1000))+"k" -def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, duration=2, - segment_num=0,loop=0): +def probe_info(in_file): ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams", in_file] @@ -26,6 +25,11 @@ def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, # the call to the `subprocess.Popen` object is completed process_id.wait() clip_info = json.loads(process_id.stdout.read().decode("utf-8")) + return clip_info + +def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, duration=2, + segment_num=0,loop=0): + clip_info = probe_info(in_file) keyframe_interval = 0 frame_height = 0 @@ -122,3 +126,35 @@ def GetABRCommand(in_file, target, streaming_type, renditions=RENDITIONS_SAMPLE, f.write(master_playlist) return cmd + +def GetLiveCommand(in_file, target, codec_type="AVC", renditions=[[842, 480, 1400000, 128000]],loop=1): + codec = "libx264" + if codec_type == "HEVC": + codec="libsvt_hevc" + + max_bitrate_ratio = 1.07 # maximum accepted bitrate fluctuations + rate_monitor_buffer_ratio = 1.5 # maximum buffer size between bitrate conformance checks + + cmd = [] + cmd_base = [] + if loop: + cmd_base = ["ffmpeg", "-hide_banner", "-y", "-stream_loop", "0", "-i", in_file] + else: + cmd_base = ["ffmpeg", "-hide_banner", "-y", "-i", in_file] + + cmd_1=[] + for idx,item in enumerate(renditions): + width = item[0] + height = item[1] + v_bitrate = to_kps(item[2]) + a_bitrate = to_kps(item[3]) + maxrate = to_kps(item[2] * max_bitrate_ratio) + bufsize = to_kps(item[2] * rate_monitor_buffer_ratio) + name= target+str(idx) + + cmd_1 += ["-vf", "scale=w="+str(width)+":"+"h="+str(height),"-c:v", codec, "-b:v", v_bitrate, "-maxrate", maxrate, "-bufsize", bufsize] + cmd_1 += ["-r", "30","-g", "100", "-bf", "2", "-refs", "2", "-preset", "veryfast", "-forced-idr", "1", "-an", "-f", "flv", name] + + cmd = cmd_base + cmd_1 + ["-abr_pipeline"] + + return cmd diff --git a/xcode-server/main.py b/xcode-server/main.py index d002116..9b2cc2a 100755 --- a/xcode-server/main.py +++ b/xcode-server/main.py @@ -5,35 +5,36 @@ from os import mkdir from zkstate import ZKState from messaging import Consumer -from abr_hls_dash import GetABRCommand +from abr_hls_dash import GetABRCommand,GetLiveCommand import traceback import time import json -KAFKA_TOPIC_VODS = "content_provider_sched_vods" +KAFKA_TOPIC = "content_provider_sched" KAFKA_GROUP = "content_provider_dash_hls_creator" ARCHIVE_ROOT = "/var/www/archive" DASH_ROOT = "/var/www/video/dash" HLS_ROOT = "/var/www/video/hls" -def process_stream(msg): +def process_stream_vods(msg): stream_name=msg["name"] stream_type=msg["type"] stream_parameters=msg["parameters"] loop= msg["loop"] stream=stream_type+"/"+stream_name + print("VOD transcode:",stream , flush=True) if not isfile(ARCHIVE_ROOT+"/"+stream_name): return - zk = ZKState("/content_provider_transcoder/"+ARCHIVE_ROOT+"/"+stream) + zk = ZKState("/content_provider_transcoder/"+ARCHIVE_ROOT+"/vods/"+stream) if zk.processed(): zk.close() return target_root=HLS_ROOT - if stream_type=="DASH": + if stream_type=="dash": target_root=DASH_ROOT try: @@ -58,11 +59,54 @@ def process_stream(msg): zk.close() +def process_stream_lives(msg): + stream_name=msg["name"] + stream_type=msg["type"] + codec_type=msg["codec"] + stream_parameters=msg["parameters"] + target=msg["target"] + loop= msg["loop"] + idx=msg["idx"] + stream=stream_type+"/"+stream_name + + if not isfile(ARCHIVE_ROOT+"/"+stream_name): + return + + target_rtmp=target+stream_type +"/media_" + str(idx)+"_" + print("LIVE transcode:",target_rtmp , flush=True) + zk = ZKState("/content_provider_transcoder/"+ARCHIVE_ROOT+"/lives/"+stream) + if zk.processed(): + zk.close() + return + + if zk.process_start(): + try: + if stream_parameters: + cmd = GetLiveCommand(ARCHIVE_ROOT+"/"+stream_name, target_rtmp, codec_type,renditions=stream_parameters,loop=loop) + else: + cmd = GetLiveCommand(ARCHIVE_ROOT+"/"+stream_name, target_rtmp, codec_type,loop=loop) + print(cmd, flush=True) + r = call(cmd) + if r: + raise Exception("status code: "+str(r)) + zk.process_end() + except: + print(traceback.format_exc(), flush=True) + zk.process_abort() + + zk.close() + +def process_stream(msg): + if msg["target"] == "file": + process_stream_vods(msg) + else: + process_stream_lives(msg) + if __name__ == "__main__": c = Consumer(KAFKA_GROUP) while True: try: - for message in c.messages(KAFKA_TOPIC_VODS): + for message in c.messages(KAFKA_TOPIC): process_stream(json.loads(message)) except: print(traceback.format_exc(), flush=True) From d58e16307ac112ac7ce45bddb8b5b975195d343a Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Mon, 21 Dec 2020 11:18:59 -0500 Subject: [PATCH 47/91] refine the swarm and helm --- deployment/docker-swarm/build.sh | 5 +++-- deployment/docker-swarm/docker-compose.yml.m4 | 12 +++++++++--- deployment/docker-swarm/platform.m4 | 1 + deployment/kubernetes/helm/build.sh | 5 +++-- .../templates/cdn-service-deployment.yaml | 4 +++- .../cdn-transcode/templates/cdn-service-service.yaml | 2 ++ .../templates/live-service-deployment.yaml | 6 +++++- .../templates/vod-service-deployment.yaml | 3 ++- .../templates/zookeeper-service-deployment.yaml | 2 +- .../kubernetes/helm/cdn-transcode/values.yaml.m4 | 6 ++++++ 10 files changed, 35 insertions(+), 11 deletions(-) create mode 100644 deployment/docker-swarm/platform.m4 diff --git a/deployment/docker-swarm/build.sh b/deployment/docker-swarm/build.sh index eb9447d..ba8cb8b 100755 --- a/deployment/docker-swarm/build.sh +++ b/deployment/docker-swarm/build.sh @@ -3,9 +3,10 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" SCENARIO="${3:-cdn}" -REGISTRY="$4" +PLATFORM="${4:-Xeon}" +REGISTRY="$5" rm -rf "$DIR/../../volume/video/cache" mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash" -m4 -DNVODS=${NVODS} -DSCENARIO=${SCENARIO} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${DIR}/docker-compose.yml.m4" > "${DIR}/docker-compose.yml" +m4 -DNVODS=${NVODS} -DSCENARIO=${SCENARIO} -DPLATFORM=${PLATFORM} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${DIR}/docker-compose.yml.m4" > "${DIR}/docker-compose.yml" diff --git a/deployment/docker-swarm/docker-compose.yml.m4 b/deployment/docker-swarm/docker-compose.yml.m4 index a229165..745497f 100644 --- a/deployment/docker-swarm/docker-compose.yml.m4 +++ b/deployment/docker-swarm/docker-compose.yml.m4 @@ -1,5 +1,6 @@ version: '3.1' +include(platform.m4) services: redis-service: @@ -53,9 +54,12 @@ services: replicas: 1 cdn-service: - image: defn(`REGISTRY_PREFIX')ovc_cdn_service:latest + image: defn(`REGISTRY_PREFIX')`ovc_'defn(`SCENARIO')_service:latest ports: +ifelse(defn(`SCENARIO'),`cdn',`dnl - "443:8443" +')dnl + - "1935:1935" volumes: - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:rw - ${VIDEO_CACHE_VOLUME}:/var/www/video:rw @@ -76,7 +80,7 @@ services: mode: 0440 vod-transcode-service: - image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest + image: defn(`REGISTRY_PREFIX')`ovc_transcode_'defn(`PLATFORM_SUFFIX'):latest volumes: - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro - ${VIDEO_CACHE_VOLUME}:/var/www/video:rw @@ -86,8 +90,9 @@ services: - kafka-service - zookeeper-service +ifelse(defn(`SCENARIO'),`cdn',`dnl live-transcode-service: - image: defn(`REGISTRY_PREFIX')ovc_software_transcode_service:latest + image: defn(`REGISTRY_PREFIX')`ovc_transcode_'defn(`PLATFORM_SUFFIX'):latest volumes: - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro depends_on: @@ -96,6 +101,7 @@ services: no_proxy: "cdn-service" NO_PROXY: "cdn-service" command: ["ffmpeg","-re","-stream_loop","-1","-i","/var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4","-vf","scale=856:480","-c:v","libx264","-b:v","8000000","-forced-idr","1","-preset","veryfast","-an","-f","flv","rtmp://cdn-service/dash/media_0_0","-vf","scale=856:480","-c:v","libsvt_hevc","-b:v","8000000","-forced-idr","1","-preset","9","-an","-f","flv","rtmp://cdn-service/hls/media_0_0","-abr_pipeline"] +')dnl secrets: self_key: diff --git a/deployment/docker-swarm/platform.m4 b/deployment/docker-swarm/platform.m4 new file mode 100644 index 0000000..a1e3c03 --- /dev/null +++ b/deployment/docker-swarm/platform.m4 @@ -0,0 +1 @@ +define(`PLATFORM_SUFFIX',translit(defn(`PLATFORM'),`A-Z',`a-z'))dnl diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh index 94b296e..8f95d45 100755 --- a/deployment/kubernetes/helm/build.sh +++ b/deployment/kubernetes/helm/build.sh @@ -4,7 +4,8 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" SCENARIO="${3:-cdn}" -REGISTRY="$4" +PLATFORM="${4:-Xeon}" +REGISTRY="$5" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') # make sure helm is functional @@ -12,5 +13,5 @@ helm version >/dev/null 2>/dev/null || exit 0 echo "Generating helm chart" . "${DIR}/../volume-info.sh" -m4 -DREGISTRY_PREFIX=${REGISTRY} -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DSCENARIO=${SCENARIO} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml" +m4 -DREGISTRY_PREFIX=${REGISTRY} -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DSCENARIO=${SCENARIO} -DPLATFORM=${PLATFORM} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml" diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml index 658d77d..242f947 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml @@ -20,11 +20,13 @@ spec: - bash - -c - /home/main.py&/usr/local/sbin/nginx - image: {{ $.Values.registryPrefix }}ovc_cdn_service:latest + image: {{ $.Values.registryPrefix }}ovc_{{ $.Values.scenario }}_service:latest imagePullPolicy: IfNotPresent name: cdn-service ports: +{{- if eq "cdn" $.Values.scenario }} - containerPort: 8443 +{{- end }} - containerPort: 1935 volumeMounts: - mountPath: /var/run/secrets diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml index 5b2ffaa..6381b00 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml @@ -6,9 +6,11 @@ metadata: name: cdn-service spec: ports: +{{- if eq "cdn" $.Values.scenario }} - name: "443" port: 443 targetPort: 8443 +{{- end }} - name: "1935" port: 1935 targetPort: 1935 diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml index bfd8cba..fb41546 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml @@ -1,4 +1,6 @@ +{{- if eq "cdn" $.Values.scenario }} + {{- range $i,$v1 := .Values.liveTranscode.streams }} apiVersion: apps/v1 @@ -19,7 +21,7 @@ spec: app: live-service-{{ $i }} spec: containers: - - image: {{ $.Values.registryPrefix }}ovc_software_transcode_service:latest + - image: {{ $.Values.registryPrefix }}ovc_transcode_{{ lower $.Values.platform }}:latest imagePullPolicy: IfNotPresent command: ["/usr/local/bin/ffmpeg","-re","-stream_loop","-1", "-i","{{ .name }}", @@ -67,3 +69,5 @@ spec: --- {{- end }} +{{- end }} + diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml index d0cb5f1..f7a8ea6 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml @@ -1,3 +1,4 @@ + apiVersion: apps/v1 kind: Deployment metadata: @@ -20,7 +21,7 @@ spec: - bash - -c - /home/main.py - image: {{ $.Values.registryPrefix }}ovc_software_transcode_service:latest + image: {{ $.Values.registryPrefix }}ovc_transcode_{{ lower $.Values.platform }}:latest imagePullPolicy: IfNotPresent name: vod-service volumeMounts: diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml index f17e857..5eaf16a 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml @@ -31,7 +31,7 @@ spec: value: "1" - name: ZOOKEEPER_TICK_TIME value: "2000" - image: zookeeper:latest + image: zookeeper:3.5.6 imagePullPolicy: IfNotPresent name: zookeeper-service ports: diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 index fe950b4..bcb436d 100644 --- a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 +++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 @@ -1,6 +1,12 @@ registryPrefix: "defn(`REGISTRY_PREFIX')" +# platform specifies the target platform: Xeon or Xeone3. +platform: "defn(`PLATFORM')" + +# scenario specifies the mode: cdn or batch. +scenario: "defn(`SCENARIO')" + zookeeper: heapSize: 1024m From ee2a4096bacae8337cc0b0d38c8eec2aeb1cd113 Mon Sep 17 00:00:00 2001 From: xwu2git <3871873+xwu2git@users.noreply.github.com> Date: Tue, 12 Jan 2021 11:45:39 -0800 Subject: [PATCH 48/91] misc changes to support simple transcode --- CMakeLists.txt | 6 +- batch/CMakeLists.txt | 7 +- batch/Dockerfile | 15 +-- batch/build.sh | 2 +- batch/main.py | 29 ++--- batch/messaging.py | 77 ------------ batch/nginx.conf | 46 -------- batch/shell.sh | 2 +- batch/transcoding.json | 25 +--- cdn-server/CMakeLists.txt | 6 +- cdn-server/build.sh | 2 +- cdn-server/shell.sh | 2 +- common/CMakeLists.txt | 2 +- common/Dockerfile | 4 +- common/build.sh | 2 +- common/shell.sh | 2 +- content-provider/CMakeLists.txt | 2 +- content-provider/build.sh | 2 +- content-provider/shell.sh | 2 +- deployment/certificate/CMakeLists.txt | 2 +- deployment/certificate/build.sh | 2 +- deployment/certificate/shell.sh | 2 +- deployment/docker-swarm/.gitignore | 1 - deployment/docker-swarm/CMakeLists.txt | 3 - deployment/docker-swarm/build.sh | 12 -- deployment/docker-swarm/docker-compose.yml.m4 | 110 ------------------ deployment/docker-swarm/platform.m4 | 1 - deployment/docker-swarm/start.sh | 11 -- deployment/docker-swarm/stop.sh | 8 -- deployment/kubernetes/helm/build.sh | 2 +- .../helm/cdn-transcode/templates/batch.yaml | 28 +++++ .../templates/cdn-service-service.yaml | 20 ---- .../{cdn-service-deployment.yaml => cdn.yaml} | 30 ++++- .../templates/kafka-service-service.yaml | 13 --- ...fka-service-deployment.yaml => kafka.yaml} | 19 ++- ...live-service-deployment.yaml => live.yaml} | 2 +- .../templates/redis-service-service.yaml | 13 --- ...dis-service-deployment.yaml => redis.yaml} | 20 ++++ .../{vod-service-deployment.yaml => vod.yaml} | 2 +- .../templates/zookeeper-service-service.yaml | 13 --- ...service-deployment.yaml => zookeeper.yaml} | 17 +++ deployment/kubernetes/yaml/batch.yaml.m4 | 29 +++++ deployment/kubernetes/yaml/build.sh | 2 +- deployment/kubernetes/yaml/cdn-svc.yaml.m4 | 22 ---- .../yaml/{cdn-deploy.yaml.m4 => cdn.yaml.m4} | 30 ++++- deployment/kubernetes/yaml/kafka-svc.yaml.m4 | 12 -- .../{kafka-deploy.yaml.m4 => kafka.yaml.m4} | 17 ++- .../{live-deploy.yaml.m4 => live.yaml.m4} | 4 +- deployment/kubernetes/yaml/redis-svc.yaml.m4 | 13 --- .../{redis-deploy.yaml.m4 => redis.yaml.m4} | 17 ++- deployment/kubernetes/yaml/start.sh | 2 +- .../yaml/{vod-deploy.yaml.m4 => vod.yaml.m4} | 2 +- .../kubernetes/yaml/zookeeper-svc.yaml.m4 | 12 -- ...eeper-deploy.yaml.m4 => zookeeper.yaml.m4} | 15 +++ kafka/CMakeLists.txt | 2 +- kafka/build.sh | 2 +- kafka/shell.sh | 2 +- streaming-server/CMakeLists.txt | 6 +- streaming-server/build.sh | 2 +- streaming-server/shell.sh | 2 +- xcode-server/CMakeLists.txt | 4 +- xcode-server/Xeon/Dockerfile | 4 +- xcode-server/{Xeone3 => XeonE3}/Dockerfile | 4 +- 63 files changed, 276 insertions(+), 495 deletions(-) delete mode 100755 batch/messaging.py delete mode 100644 batch/nginx.conf delete mode 100644 deployment/docker-swarm/.gitignore delete mode 100644 deployment/docker-swarm/CMakeLists.txt delete mode 100755 deployment/docker-swarm/build.sh delete mode 100644 deployment/docker-swarm/docker-compose.yml.m4 delete mode 100644 deployment/docker-swarm/platform.m4 delete mode 100755 deployment/docker-swarm/start.sh delete mode 100755 deployment/docker-swarm/stop.sh create mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/batch.yaml delete mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml rename deployment/kubernetes/helm/cdn-transcode/templates/{cdn-service-deployment.yaml => cdn.yaml} (74%) delete mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-service.yaml rename deployment/kubernetes/helm/cdn-transcode/templates/{kafka-service-deployment.yaml => kafka.yaml} (86%) rename deployment/kubernetes/helm/cdn-transcode/templates/{live-service-deployment.yaml => live.yaml} (95%) delete mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/redis-service-service.yaml rename deployment/kubernetes/helm/cdn-transcode/templates/{redis-service-deployment.yaml => redis.yaml} (68%) rename deployment/kubernetes/helm/cdn-transcode/templates/{vod-service-deployment.yaml => vod.yaml} (90%) delete mode 100644 deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-service.yaml rename deployment/kubernetes/helm/cdn-transcode/templates/{zookeeper-service-deployment.yaml => zookeeper.yaml} (83%) create mode 100644 deployment/kubernetes/yaml/batch.yaml.m4 delete mode 100644 deployment/kubernetes/yaml/cdn-svc.yaml.m4 rename deployment/kubernetes/yaml/{cdn-deploy.yaml.m4 => cdn.yaml.m4} (75%) delete mode 100644 deployment/kubernetes/yaml/kafka-svc.yaml.m4 rename deployment/kubernetes/yaml/{kafka-deploy.yaml.m4 => kafka.yaml.m4} (89%) rename deployment/kubernetes/yaml/{live-deploy.yaml.m4 => live.yaml.m4} (93%) delete mode 100644 deployment/kubernetes/yaml/redis-svc.yaml.m4 rename deployment/kubernetes/yaml/{redis-deploy.yaml.m4 => redis.yaml.m4} (79%) rename deployment/kubernetes/yaml/{vod-deploy.yaml.m4 => vod.yaml.m4} (93%) delete mode 100644 deployment/kubernetes/yaml/zookeeper-svc.yaml.m4 rename deployment/kubernetes/yaml/{zookeeper-deploy.yaml.m4 => zookeeper.yaml.m4} (86%) rename xcode-server/{Xeone3 => XeonE3}/Dockerfile (90%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9d19bae..885e007 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required (VERSION 2.8) -Project(OVC NONE) +Project(CDNTranscodeSample NONE) if (NOT DEFINED REGISTRY) set(REGISTRY "") @@ -19,10 +19,10 @@ if (NOT DEFINED PLATFORM) endif() if (NOT DEFINED SCENARIO) - set(SCENARIO "cdn") + set(SCENARIO "transcode") endif() -if (SCENARIO STREQUAL "batch") +if (SCENARIO STREQUAL "transcode") set(NLIVES "0") endif() diff --git a/batch/CMakeLists.txt b/batch/CMakeLists.txt index 7dc4dc4..64b1000 100644 --- a/batch/CMakeLists.txt +++ b/batch/CMakeLists.txt @@ -1,2 +1,5 @@ -set(service "ovc_batch_service") -include("${CMAKE_SOURCE_DIR}/script/service.cmake") +if (SCENARIO STREQUAL "transcode") + set(service "tc_batch_service") + include("${CMAKE_SOURCE_DIR}/script/service.cmake") + add_dependencies(build_${service} build_tc_common) +endif() diff --git a/batch/Dockerfile b/batch/Dockerfile index 4698bc2..b7007d4 100644 --- a/batch/Dockerfile +++ b/batch/Dockerfile @@ -1,22 +1,15 @@ -FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.7 +FROM tc_common -Run DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-setuptools python3-redis python-celery-common python3-tornado python3-kafka python3-kazoo openssh-server && rm -rf /var/lib/apt/lists/* +Run apt-get update -q && apt-get install -y -q python3-kafka python3-kazoo && rm -rf /var/lib/apt/lists/* COPY *.py /home/ COPY *.json /home/ -COPY *.conf /etc/nginx/ -CMD ["/bin/bash","-c","/home/main.py&/usr/local/sbin/nginx"] +CMD ["/bin/bash","-c","/home/main.py"] WORKDIR /home #### ARG UID -ARG GID -## must use ; here to ignore user exist status code -RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \ - [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \ - touch /var/run/nginx.pid && \ - mkdir -p /var/log/nginx /var/lib/nginx /var/www/video /var/www/archive && \ - chown -R ${UID}:${GID} /var/run/nginx.pid /var/www /var/log/nginx /var/lib/nginx +RUN mkdir -p /var/www/archive USER ${UID} #### diff --git a/batch/build.sh b/batch/build.sh index f2c3532..c22e5f7 100755 --- a/batch/build.sh +++ b/batch/build.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_batch_service" +IMAGE="tc_batch_service" DIR=$(dirname $(readlink -f "$0")) . "${DIR}/../script/build.sh" diff --git a/batch/main.py b/batch/main.py index c1da68b..b112f56 100755 --- a/batch/main.py +++ b/batch/main.py @@ -1,13 +1,11 @@ #!/usr/bin/python3 -from os.path import isfile from messaging import Producer -import time from os import listdir +import time import json KAFKA_TOPIC = "content_provider_sched" -DASHLS_ROOT = "/var/www" ARCHIVE_ROOT = "/var/www/archive" config_file="/home/transcoding.json" @@ -19,27 +17,18 @@ info=json.load(fd) producer = Producer() -for idx,stream in enumerate(info[0]["vods"]): +for idx,stream in enumerate(info["vods"]): # schedule producing the stream if stream["name"] in streams: msg=stream msg.update({"idx": idx}) print("start VOD transccoding on {} with {}: ".format(stream["name"],stream["type"]), flush=True) print(msg,flush=True) - producer.send(KAFKA_TOPIC, json.dumps(msg)) - -for idx,stream in enumerate(info[0]["lives"]): - # schedule producing the stream - if stream["name"] in streams: - msg=stream - msg.update({"idx": idx}) - print("start LIVE transccoding on {} with {}: ".format(stream["name"],stream["type"]), flush=True) - print(msg,flush=True) - producer.send(KAFKA_TOPIC, json.dumps(msg)) - -producer.close() - -while True: - print("Running...",flush=True) - time.sleep(30) + while True: + try: + producer.send(KAFKA_TOPIC, json.dumps(msg)) + break + except Exception as e: + print("Exception: {}".format(e)) + time.sleep(5) diff --git a/batch/messaging.py b/batch/messaging.py deleted file mode 100755 index f11184a..0000000 --- a/batch/messaging.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/python3 - -import socket -from kafka import KafkaProducer, KafkaConsumer, TopicPartition -import traceback -import socket -import time - -KAFKA_HOSTS = ["kafka-service:9092"] - -class Producer(object): - def __init__(self): - super(Producer, self).__init__() - self._client_id = socket.gethostname() - self._producer = None - - def send(self, topic, message): - if not self._producer: - try: - self._producer = KafkaProducer(bootstrap_servers=KAFKA_HOSTS, - client_id=self._client_id, - api_version=(0, 10), acks=0) - except: - print(traceback.format_exc(), flush=True) - self._producer = None - - try: - self._producer.send(topic, message.encode('utf-8')) - except: - print(traceback.format_exc(), flush=True) - - def flush(self): - if self._producer: - self._producer.flush() - - def close(self): - if self._producer: - self._producer.close() - self._producer=None - -class Consumer(object): - def __init__(self, group=None): - super(Consumer, self).__init__() - self._client_id = socket.gethostname() - self._group = group - - def messages(self, topic, timeout=None): - c = KafkaConsumer(topic, bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, - group_id=self._group, auto_offset_reset="earliest", api_version=(0, 10)) - - for msg in c: - yield msg.value.decode('utf-8') - c.close() - - def debug(self, topic): - c = KafkaConsumer(bootstrap_servers=KAFKA_HOSTS, client_id=self._client_id, - group_id=None, api_version=(0, 10)) - - # assign/subscribe topic - partitions = c.partitions_for_topic(topic) - if not partitions: - raise Exception("Topic "+topic+" not exist") - c.assign([TopicPartition(topic, p) for p in partitions]) - - # seek to beginning if needed - c.seek_to_beginning() - - # fetch messages - while True: - partitions = c.poll(100) - if partitions: - for p in partitions: - for msg in partitions[p]: - yield msg.value.decode('utf-8') - yield "" - - c.close() diff --git a/batch/nginx.conf b/batch/nginx.conf deleted file mode 100644 index 2549836..0000000 --- a/batch/nginx.conf +++ /dev/null @@ -1,46 +0,0 @@ - -worker_processes auto; -worker_rlimit_nofile 8192; -daemon off; -error_log /var/www/log/error.log warn; - -events { - worker_connections 4096; -} - -rtmp { - server { - listen 1935; - chunk_size 4000; - - application stream { - live on; - } - - application hls { - live on; - hls on; - hls_path /var/www/video/hls; - hls_nested on; - hls_fragment 3; - hls_playlist_length 60; - hls_variant _low BANDWIDTH=2048000 RESOLUTION=854x480; - hls_variant _mid BANDWIDTH=4096000 RESOLUTION=1280x720; - hls_variant _hi BANDWIDTH=8192000 RESOLUTION=1920x1080; - } - - application dash { - live on; - dash on; - dash_path /var/www/video/dash; - dash_fragment 4; - dash_playlist_length 120; - dash_nested on; - dash_repetition on; - dash_cleanup on; - dash_variant _low bandwidth="2048000" width="854" height="480"; - dash_variant _med bandwidth="4096000" width="1280" height="720"; - dash_variant _hi bandwidth="8192000" width="1920" height="1080" max; - } - } -} diff --git a/batch/shell.sh b/batch/shell.sh index fcf6b39..054a825 100755 --- a/batch/shell.sh +++ b/batch/shell.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_batch_service" +IMAGE="tc_batch_service" DIR=$(dirname $(readlink -f "$0")) OPTIONS=("--volume=${DIR}/../../volume/video/archive:/var/www/archive:ro" "--volume=${DIR}/../../volume/video/dash:/var/www/dash:ro" "--volume=${DIR}/../../volume/video/hls:/var/www/hls:ro") diff --git a/batch/transcoding.json b/batch/transcoding.json index 29b8770..1721694 100644 --- a/batch/transcoding.json +++ b/batch/transcoding.json @@ -1,4 +1,4 @@ -[{ +{ "vods": [{ "name": "bbb_sunflower_1080p_30fps_normal.mp4", "type": "hls", @@ -29,26 +29,5 @@ "loop": 0, "target": "file", "platform": "software" - }], - "lives": [{ - "name": "bbb_sunflower_1080p_30fps_normal.mp4", - "type": "hls", - "parameters": [ - [1920, 1080, 5000000, 192000] - ], - "codec": "AVC", - "loop": 1, - "target": "rtmp://cdn-service/", - "platform": "software" - },{ - "name": "bbb_sunflower_1080p_30fps_normal.mp4", - "type": "dash", - "parameters": [ - [1920, 1080, 5000000, 192000] - ], - "codec": "AVC", - "loop": 1, - "target": "rtmp://cdn-service/", - "platform": "software" }] -}] +} diff --git a/cdn-server/CMakeLists.txt b/cdn-server/CMakeLists.txt index 0fd8b9e..f9d93fb 100644 --- a/cdn-server/CMakeLists.txt +++ b/cdn-server/CMakeLists.txt @@ -1,2 +1,4 @@ -set(service "ovc_cdn_service") -include("${CMAKE_SOURCE_DIR}/script/service.cmake") +if (SCENARIO STREQUAL "cdn") + set(service "tc_cdn_service") + include("${CMAKE_SOURCE_DIR}/script/service.cmake") +endif() diff --git a/cdn-server/build.sh b/cdn-server/build.sh index edf72d2..f3b188d 100755 --- a/cdn-server/build.sh +++ b/cdn-server/build.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_cdn_service" +IMAGE="tc_cdn_service" DIR=$(dirname $(readlink -f "$0")) . "${DIR}/../script/build.sh" diff --git a/cdn-server/shell.sh b/cdn-server/shell.sh index 777de8b..a7b0c74 100755 --- a/cdn-server/shell.sh +++ b/cdn-server/shell.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_cdn_service" +IMAGE="tc_cdn_service" DIR=$(dirname $(readlink -f "$0")) OPTIONS=("--volume=${DIR}/../../volume/video/archive:/var/www/archive:ro" "--volume=${DIR}/../../volume/video/dash:/var/www/dash:ro" "--volume=${DIR}/../../volume/video/hls:/var/www/hls:ro") diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 4e41dcc..bef91ce 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -1,2 +1,2 @@ -set(service "ovc_cdn_common") +set(service "tc_common") include("${CMAKE_SOURCE_DIR}/script/service.cmake") diff --git a/common/Dockerfile b/common/Dockerfile index 5fb8a46..3a85b95 100644 --- a/common/Dockerfile +++ b/common/Dockerfile @@ -1,7 +1,7 @@ -FROM centos:7.6.1810 +FROM ubuntu:18.04 -RUN yum install -y -q epel-release && yum install -y -q python36-requests python36-ply python36-psutil && rm -rf /var/cache/yum/* +RUN apt-get update -q && apt-get install -y -q python3-requests python3-ply python3-psutil && rm -rf /var/lib/apt/lists/* COPY *.py /home/ ENV PYTHONIOENCODING=UTF-8 diff --git a/common/build.sh b/common/build.sh index d04672c..45ec0cd 100755 --- a/common/build.sh +++ b/common/build.sh @@ -1,5 +1,5 @@ #!/bin/bash -e -IMAGE="ovc_cdn_common" +IMAGE="tc_common" DIR=$(dirname $(readlink -f "$0")) . "$DIR/../script/build.sh" diff --git a/common/shell.sh b/common/shell.sh index c8489b0..633016d 100755 --- a/common/shell.sh +++ b/common/shell.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_cdn_common" +IMAGE="tc_common" DIR=$(dirname $(readlink -f "$0")) . "$DIR/../script/shell.sh" diff --git a/content-provider/CMakeLists.txt b/content-provider/CMakeLists.txt index e2e455e..8623124 100644 --- a/content-provider/CMakeLists.txt +++ b/content-provider/CMakeLists.txt @@ -1,2 +1,2 @@ -set(service "ovc_content_provider_archive") +set(service "tc_content_provider_archive") include("${CMAKE_SOURCE_DIR}/script/service.cmake") diff --git a/content-provider/build.sh b/content-provider/build.sh index a165774..9f697b0 100755 --- a/content-provider/build.sh +++ b/content-provider/build.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_content_provider_archive" +IMAGE="tc_content_provider_archive" DIR=$(dirname $(readlink -f "$0")) sample_video="http://distribution.bbb3d.renderfarming.net/video/mp4" clips=("$sample_video/bbb_sunflower_1080p_30fps_normal.mp4") diff --git a/content-provider/shell.sh b/content-provider/shell.sh index 07dbf94..fb46716 100755 --- a/content-provider/shell.sh +++ b/content-provider/shell.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_content_provider_archive" +IMAGE="tc_content_provider_archive" DIR=$(dirname $(readlink -f "$0")) OPTIONS=("--volume=$DIR/../volume/video:/mnt:rw" "--volume=$DIR:/home:ro") diff --git a/deployment/certificate/CMakeLists.txt b/deployment/certificate/CMakeLists.txt index ca21d02..418c4f2 100644 --- a/deployment/certificate/CMakeLists.txt +++ b/deployment/certificate/CMakeLists.txt @@ -1,2 +1,2 @@ -set(service "ovc_self_certificate") +set(service "tc_self_certificate") include("${CMAKE_SOURCE_DIR}/script/service.cmake") diff --git a/deployment/certificate/build.sh b/deployment/certificate/build.sh index e57c154..eed5005 100755 --- a/deployment/certificate/build.sh +++ b/deployment/certificate/build.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ovc_self_certificate" +IMAGE="tc_self_certificate" DIR=$(dirname $(readlink -f "$0")) . "$DIR/../../script/build.sh" diff --git a/deployment/certificate/shell.sh b/deployment/certificate/shell.sh index 9674d40..c91244e 100755 --- a/deployment/certificate/shell.sh +++ b/deployment/certificate/shell.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -IMAGE="ssai_self_certificate" +IMAGE="tc_self_certificate" DIR=$(dirname $(readlink -f "$0")) . "$DIR/../../script/shell.sh" diff --git a/deployment/docker-swarm/.gitignore b/deployment/docker-swarm/.gitignore deleted file mode 100644 index 1120be9..0000000 --- a/deployment/docker-swarm/.gitignore +++ /dev/null @@ -1 +0,0 @@ -docker-compose.yml diff --git a/deployment/docker-swarm/CMakeLists.txt b/deployment/docker-swarm/CMakeLists.txt deleted file mode 100644 index 568397a..0000000 --- a/deployment/docker-swarm/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -set(service "docker_swarm") -include("${CMAKE_SOURCE_DIR}/script/service.cmake") -include("${CMAKE_SOURCE_DIR}/script/deployment.cmake") diff --git a/deployment/docker-swarm/build.sh b/deployment/docker-swarm/build.sh deleted file mode 100755 index ba8cb8b..0000000 --- a/deployment/docker-swarm/build.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -e - -DIR=$(dirname $(readlink -f "$0")) -NVODS="${1:-1}" -SCENARIO="${3:-cdn}" -PLATFORM="${4:-Xeon}" -REGISTRY="$5" - -rm -rf "$DIR/../../volume/video/cache" -mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash" - -m4 -DNVODS=${NVODS} -DSCENARIO=${SCENARIO} -DPLATFORM=${PLATFORM} -DREGISTRY_PREFIX=${REGISTRY} -I "${DIR}" "${DIR}/docker-compose.yml.m4" > "${DIR}/docker-compose.yml" diff --git a/deployment/docker-swarm/docker-compose.yml.m4 b/deployment/docker-swarm/docker-compose.yml.m4 deleted file mode 100644 index 745497f..0000000 --- a/deployment/docker-swarm/docker-compose.yml.m4 +++ /dev/null @@ -1,110 +0,0 @@ -version: '3.1' - -include(platform.m4) -services: - - redis-service: - image: redis:latest - restart: always - deploy: - replicas: 1 - user: redis - command: - redis-server - - zookeeper-service: - image: zookeeper:latest - environment: - ZOOKEEPER_SERVER_ID: 1 - ZOOKEEPER_CLIENT_PORT: '2181' - ZOOKEEPER_TICK_TIME: '2000' - ZOOKEEPER_HEAP_OPTS: '-Xmx2048m -Xms2048m' - ZOOKEEPER_MAX_CLIENT_CNXNS: '20000' - ZOOKEEPER_LOG4J_LOGGERS: 'zookeepr=ERROR' - ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'ERROR' - user: zookeeper - restart: always - deploy: - replicas: 1 - - kafka-service: - image: defn(`REGISTRY_PREFIX')ovc_kafka_service:latest - depends_on: - - zookeeper-service - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ADVERTISED_HOST_NAME: 'kafka-service' - KAFKA_ADVERTISED_PORT: '9092' - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-service:2181' - KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka-service:9092' - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'PLAINTEXT:PLAINTEXT' - KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_DEFAULT_REPLICATION_FACTOR: 1 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' - KAFKA_NUM_PARTITIONS: 16 - KAFKA_CREATE_TOPICS: 'content_provider_sched:16:1' - KAFKA_LOG_RETENTION_HOURS: 8 - KAFKA_HEAP_OPTS: '-Xmx1024m -Xms1024m' - KAFKA_LOG4J_LOGGERS: 'kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR' - KAFKA_LOG4J_ROOT_LOGLEVEL: 'ERROR' - user: kafka - restart: always - deploy: - replicas: 1 - - cdn-service: - image: defn(`REGISTRY_PREFIX')`ovc_'defn(`SCENARIO')_service:latest - ports: -ifelse(defn(`SCENARIO'),`cdn',`dnl - - "443:8443" -')dnl - - "1935:1935" - volumes: - - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:rw - - ${VIDEO_CACHE_VOLUME}:/var/www/video:rw - depends_on: - - kafka-service - deploy: - replicas: 1 - secrets: - - source: self_crt - target: /var/run/secrets/self.crt - uid: ${USER_ID} - gid: ${GROUP_ID} - mode: 0444 - - source: self_key - target: /var/run/secrets/self.key - uid: ${USER_ID} - gid: ${GROUP_ID} - mode: 0440 - - vod-transcode-service: - image: defn(`REGISTRY_PREFIX')`ovc_transcode_'defn(`PLATFORM_SUFFIX'):latest - volumes: - - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro - - ${VIDEO_CACHE_VOLUME}:/var/www/video:rw - deploy: - replicas: defn(`NVODS') - depends_on: - - kafka-service - - zookeeper-service - -ifelse(defn(`SCENARIO'),`cdn',`dnl - live-transcode-service: - image: defn(`REGISTRY_PREFIX')`ovc_transcode_'defn(`PLATFORM_SUFFIX'):latest - volumes: - - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro - depends_on: - - cdn-service - environment: - no_proxy: "cdn-service" - NO_PROXY: "cdn-service" - command: ["ffmpeg","-re","-stream_loop","-1","-i","/var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4","-vf","scale=856:480","-c:v","libx264","-b:v","8000000","-forced-idr","1","-preset","veryfast","-an","-f","flv","rtmp://cdn-service/dash/media_0_0","-vf","scale=856:480","-c:v","libsvt_hevc","-b:v","8000000","-forced-idr","1","-preset","9","-an","-f","flv","rtmp://cdn-service/hls/media_0_0","-abr_pipeline"] -')dnl - -secrets: - self_key: - file: ${SECRETS_VOLUME}/self.key - self_crt: - file: ${SECRETS_VOLUME}/self.crt diff --git a/deployment/docker-swarm/platform.m4 b/deployment/docker-swarm/platform.m4 deleted file mode 100644 index a1e3c03..0000000 --- a/deployment/docker-swarm/platform.m4 +++ /dev/null @@ -1 +0,0 @@ -define(`PLATFORM_SUFFIX',translit(defn(`PLATFORM'),`A-Z',`a-z'))dnl diff --git a/deployment/docker-swarm/start.sh b/deployment/docker-swarm/start.sh deleted file mode 100755 index 2b9c929..0000000 --- a/deployment/docker-swarm/start.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -e - -DIR=$(dirname $(readlink -f "$0")) -export VIDEO_ARCHIVE_VOLUME=$(readlink -f "$DIR/../../volume/video/archive") -export VIDEO_CACHE_VOLUME=$(readlink -f "$DIR/../../volume/video/cache") -export SECRETS_VOLUME=$(readlink -f "$DIR/../certificate") - -export USER_ID=$(id -u) -export GROUP_ID=$(id -g) -"$DIR/../certificate/self-sign.sh" -docker stack deploy -c "$DIR/docker-compose.yml" cdnt diff --git a/deployment/docker-swarm/stop.sh b/deployment/docker-swarm/stop.sh deleted file mode 100755 index c7ef1d3..0000000 --- a/deployment/docker-swarm/stop.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -e - -DIR=$(dirname $(readlink -f "$0")) - -yml="$DIR/docker-compose.$(hostname).yml" -test -f "$yml" || yml="$DIR/docker-compose.yml" - -docker stack rm cdnt diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh index 8f95d45..fd3625b 100755 --- a/deployment/kubernetes/helm/build.sh +++ b/deployment/kubernetes/helm/build.sh @@ -3,7 +3,7 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" -SCENARIO="${3:-cdn}" +SCENARIO="${3:-transcode}" PLATFORM="${4:-Xeon}" REGISTRY="$5" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/batch.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/batch.yaml new file mode 100644 index 0000000..bc22351 --- /dev/null +++ b/deployment/kubernetes/helm/cdn-transcode/templates/batch.yaml @@ -0,0 +1,28 @@ + +apiVersion: batch/v1 +kind: Job +metadata: + name: batch +spec: + template: + spec: + enableServiceLinks: false + containers: + - name: batch + image: {{ $.Values.registryPrefix }}tc_batch_service:latest + imagePullPolicy: IfNotPresent + env: + - name: NO_PROXY + value: "*" + - name: no_proxy + value: "*" + volumeMounts: + - mountPath: /var/www/archive + name: video-archive + readOnly: true + volumes: + - name: video-archive + persistentVolumeClaim: + claimName: video-archive + restartPolicy: Never + diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml deleted file mode 100644 index 6381b00..0000000 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-service.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: cdn-service - name: cdn-service -spec: - ports: -{{- if eq "cdn" $.Values.scenario }} - - name: "443" - port: 443 - targetPort: 8443 -{{- end }} - - name: "1935" - port: 1935 - targetPort: 1935 - externalIPs: - - "{{ .Values.cdn.hostIP }}" - selector: - app: cdn-service diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn.yaml similarity index 74% rename from deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml rename to deployment/kubernetes/helm/cdn-transcode/templates/cdn.yaml index 242f947..d33954e 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/cdn-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn.yaml @@ -1,3 +1,27 @@ + +{{- if eq "cdn" $.Values.scenario }} + +apiVersion: v1 +kind: Service +metadata: + labels: + app: cdn-service + name: cdn-service +spec: + ports: + - name: "443" + port: 443 + targetPort: 8443 + - name: "1935" + port: 1935 + targetPort: 1935 + externalIPs: + - "{{ .Values.cdn.hostIP }}" + selector: + app: cdn-service + +--- + apiVersion: apps/v1 kind: Deployment metadata: @@ -20,13 +44,11 @@ spec: - bash - -c - /home/main.py&/usr/local/sbin/nginx - image: {{ $.Values.registryPrefix }}ovc_{{ $.Values.scenario }}_service:latest + image: {{ $.Values.registryPrefix }}tc_{{ $.Values.scenario }}_service:latest imagePullPolicy: IfNotPresent name: cdn-service ports: -{{- if eq "cdn" $.Values.scenario }} - containerPort: 8443 -{{- end }} - containerPort: 1935 volumeMounts: - mountPath: /var/run/secrets @@ -47,3 +69,5 @@ spec: persistentVolumeClaim: claimName: video-cache restartPolicy: Always + +{{- end }} diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-service.yaml deleted file mode 100644 index 45940ca..0000000 --- a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: kafka-service - name: kafka-service -spec: - ports: - - name: "9092" - port: 9092 - targetPort: 9092 - selector: - app: kafka-service diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/kafka.yaml similarity index 86% rename from deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml rename to deployment/kubernetes/helm/cdn-transcode/templates/kafka.yaml index 6a46b1c..23cc6bb 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/kafka-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/kafka.yaml @@ -1,3 +1,20 @@ + +apiVersion: v1 +kind: Service +metadata: + labels: + app: kafka-service + name: kafka-service +spec: + ports: + - name: "9092" + port: 9092 + targetPort: 9092 + selector: + app: kafka-service + +--- + apiVersion: apps/v1 kind: Deployment metadata: @@ -48,7 +65,7 @@ spec: value: "1" - name: KAFKA_ZOOKEEPER_CONNECT value: zookeeper-service:2181 - image: {{ $.Values.registryPrefix }}ovc_kafka_service:latest + image: {{ $.Values.registryPrefix }}tc_kafka_service:latest imagePullPolicy: IfNotPresent name: kafka-service ports: diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live.yaml similarity index 95% rename from deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml rename to deployment/kubernetes/helm/cdn-transcode/templates/live.yaml index fb41546..da7e2d6 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/live-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/live.yaml @@ -21,7 +21,7 @@ spec: app: live-service-{{ $i }} spec: containers: - - image: {{ $.Values.registryPrefix }}ovc_transcode_{{ lower $.Values.platform }}:latest + - image: {{ $.Values.registryPrefix }}tc_transcode_{{ lower $.Values.platform }}:latest imagePullPolicy: IfNotPresent command: ["/usr/local/bin/ffmpeg","-re","-stream_loop","-1", "-i","{{ .name }}", diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-service.yaml deleted file mode 100644 index 22bd6ad..0000000 --- a/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: redis-service - name: redis-service -spec: - ports: - - name: "6379" - port: 6379 - targetPort: 6379 - selector: - app: redis-service diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/redis.yaml similarity index 68% rename from deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml rename to deployment/kubernetes/helm/cdn-transcode/templates/redis.yaml index 2377064..43ec5f9 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/redis-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/redis.yaml @@ -1,3 +1,21 @@ +{{- if eq "cdn" $.Values.scenario }} + +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis-service + name: redis-service +spec: + ports: + - name: "6379" + port: 6379 + targetPort: 6379 + selector: + app: redis-service + +--- + apiVersion: apps/v1 kind: Deployment metadata: @@ -26,3 +44,5 @@ spec: securityContext: runAsUser: 999 restartPolicy: Always + +{{- end }} diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/vod.yaml similarity index 90% rename from deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml rename to deployment/kubernetes/helm/cdn-transcode/templates/vod.yaml index f7a8ea6..45710dc 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/vod-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/vod.yaml @@ -21,7 +21,7 @@ spec: - bash - -c - /home/main.py - image: {{ $.Values.registryPrefix }}ovc_transcode_{{ lower $.Values.platform }}:latest + image: {{ $.Values.registryPrefix }}tc_transcode_{{ lower $.Values.platform }}:latest imagePullPolicy: IfNotPresent name: vod-service volumeMounts: diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-service.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-service.yaml deleted file mode 100644 index acdf70d..0000000 --- a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: zookeeper-service - name: zookeeper-service -spec: - ports: - - name: "2181" - port: 2181 - targetPort: 2181 - selector: - app: zookeeper-service diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper.yaml similarity index 83% rename from deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml rename to deployment/kubernetes/helm/cdn-transcode/templates/zookeeper.yaml index 5eaf16a..299ec7c 100644 --- a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper-service-deployment.yaml +++ b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper.yaml @@ -1,3 +1,20 @@ + +apiVersion: v1 +kind: Service +metadata: + labels: + app: zookeeper-service + name: zookeeper-service +spec: + ports: + - name: "2181" + port: 2181 + targetPort: 2181 + selector: + app: zookeeper-service + +--- + apiVersion: apps/v1 kind: Deployment metadata: diff --git a/deployment/kubernetes/yaml/batch.yaml.m4 b/deployment/kubernetes/yaml/batch.yaml.m4 new file mode 100644 index 0000000..5fe8a86 --- /dev/null +++ b/deployment/kubernetes/yaml/batch.yaml.m4 @@ -0,0 +1,29 @@ +include(platform.m4) + +apiVersion: batch/v1 +kind: Job +metadata: + name: batch +spec: + template: + spec: + enableServiceLinks: false + containers: + - name: batch + image: defn(`REGISTRY_PREFIX')tc_batch_service:latest + imagePullPolicy: IfNotPresent + env: + - name: NO_PROXY + value: "*" + - name: no_proxy + value: "*" + volumeMounts: + - mountPath: /var/www/archive + name: video-archive + readOnly: true + volumes: + - name: video-archive + persistentVolumeClaim: + claimName: video-archive + restartPolicy: Never +PLATFORM_NODE_SELECTOR(`Xeon')dnl diff --git a/deployment/kubernetes/yaml/build.sh b/deployment/kubernetes/yaml/build.sh index 04d02ee..7295e86 100755 --- a/deployment/kubernetes/yaml/build.sh +++ b/deployment/kubernetes/yaml/build.sh @@ -3,7 +3,7 @@ DIR=$(dirname $(readlink -f "$0")) NVODS="${1:-1}" NLIVES="${2:-1}" -SCENARIO="${3:-cdn}" +SCENARIO="${3:-transcode}" PLATFORM="${4:-Xeon}" REGISTRY="$5" HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}') diff --git a/deployment/kubernetes/yaml/cdn-svc.yaml.m4 b/deployment/kubernetes/yaml/cdn-svc.yaml.m4 deleted file mode 100644 index 0cbe771..0000000 --- a/deployment/kubernetes/yaml/cdn-svc.yaml.m4 +++ /dev/null @@ -1,22 +0,0 @@ - -apiVersion: v1 -kind: Service -metadata: - name: cdn-service - labels: - app: cdn -spec: - ports: -ifelse(defn(`SCENARIO'),`cdn',`dnl - - port: 443 - targetPort: 8443 - name: https -')dnl - - port: 1935 - targetPort: 1935 - name: rtmp - externalIPs: - - defn(`HOSTIP') - selector: - app: cdn - diff --git a/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 b/deployment/kubernetes/yaml/cdn.yaml.m4 similarity index 75% rename from deployment/kubernetes/yaml/cdn-deploy.yaml.m4 rename to deployment/kubernetes/yaml/cdn.yaml.m4 index a316b4f..b5878cf 100644 --- a/deployment/kubernetes/yaml/cdn-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/cdn.yaml.m4 @@ -1,6 +1,29 @@ include(platform.m4) include(configure.m4) +ifelse(defn(`SCENARIO'),`cdn',` +apiVersion: v1 +kind: Service +metadata: + name: cdn-service + labels: + app: cdn +spec: + ports: + - port: 443 + targetPort: 8443 + name: https + - port: 1935 + targetPort: 1935 + name: rtmp + externalIPs: + - defn(`HOSTIP') + selector: + app: cdn + +--- +') + apiVersion: apps/v1 kind: Deployment metadata: @@ -8,7 +31,7 @@ metadata: labels: app: cdn spec: - replicas: 1 + replicas: ifelse(defn(`SCENARIO'),`cdn',1,0) selector: matchLabels: app: cdn @@ -20,12 +43,10 @@ spec: enableServiceLinks: false containers: - name: cdn - image: defn(`REGISTRY_PREFIX')`ovc_'defn(`SCENARIO')_service:latest + image: defn(`REGISTRY_PREFIX')`tc_'defn(`SCENARIO')_service:latest imagePullPolicy: IfNotPresent ports: -ifelse(defn(`SCENARIO'),`cdn',`dnl - containerPort: 8443 -')dnl - containerPort: 1935 resources: limits: @@ -53,4 +74,3 @@ ifelse(defn(`SCENARIO'),`cdn',`dnl secret: secretName: self-signed-certificate PLATFORM_NODE_SELECTOR(`Xeon')dnl - diff --git a/deployment/kubernetes/yaml/kafka-svc.yaml.m4 b/deployment/kubernetes/yaml/kafka-svc.yaml.m4 deleted file mode 100644 index 6adfca0..0000000 --- a/deployment/kubernetes/yaml/kafka-svc.yaml.m4 +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: kafka-service - labels: - app: kafka -spec: - ports: - - port: 9092 - protocol: TCP - selector: - app: kafka diff --git a/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 b/deployment/kubernetes/yaml/kafka.yaml.m4 similarity index 89% rename from deployment/kubernetes/yaml/kafka-deploy.yaml.m4 rename to deployment/kubernetes/yaml/kafka.yaml.m4 index a860bdb..4bc430c 100644 --- a/deployment/kubernetes/yaml/kafka-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/kafka.yaml.m4 @@ -1,6 +1,21 @@ include(platform.m4) include(configure.m4) +apiVersion: v1 +kind: Service +metadata: + name: kafka-service + labels: + app: kafka +spec: + ports: + - port: 9092 + protocol: TCP + selector: + app: kafka + +--- + apiVersion: apps/v1 kind: Deployment metadata: @@ -20,7 +35,7 @@ spec: enableServiceLinks: false containers: - name: kafka - image: defn(`REGISTRY_PREFIX')ovc_kafka_service:latest + image: defn(`REGISTRY_PREFIX')tc_kafka_service:latest imagePullPolicy: IfNotPresent ports: - containerPort: 9092 diff --git a/deployment/kubernetes/yaml/live-deploy.yaml.m4 b/deployment/kubernetes/yaml/live.yaml.m4 similarity index 93% rename from deployment/kubernetes/yaml/live-deploy.yaml.m4 rename to deployment/kubernetes/yaml/live.yaml.m4 index 5467646..1b257c8 100644 --- a/deployment/kubernetes/yaml/live-deploy.yaml.m4 +++ b/deployment/kubernetes/yaml/live.yaml.m4 @@ -11,7 +11,7 @@ metadata: labels: app: live-defn(`LIDX') spec: - replicas: eval(defn(`LIDX') Date: Tue, 12 Jan 2021 12:21:20 -0800 Subject: [PATCH 49/91] update docs --- README.md | 93 +- deployment/kubernetes/README.md | 39 + .../kubernetes/helm/cdn-transcode/README.md | 2 +- .../logging/elasticsearch/es-statefulSet.yaml | 109 - .../logging/elasticsearch/es-svc.yaml | 19 - .../logging/fluentd/fluentd-es-configMap.yaml | 449 - .../logging/fluentd/fluentd-es-ds.yaml | 96 - .../logging/kibana/kibana-configMap.yaml | 14 - .../logging/kibana/kibana-deploy.yaml | 59 - .../kubernetes/logging/kibana/kibana-svc.yaml | 19 - .../kubernetes/logging/start_logging.sh | 32 - deployment/kubernetes/logging/stop_logging.sh | 34 - .../alertmanager-configMapEmail.yaml | 30 - .../alertmanager-configMapRules.yaml | 56 - .../alertmanager/alertmanager-deploy.yaml | 74 - .../alertmanager/alertmanager-svc.yaml | 16 - ...grafana-configMapDashboardDefinitions.yaml | 11120 ---------------- .../grafana/grafana-configMapDashboards.yaml | 17 - .../grafana/grafana-configMapDataSources.yaml | 20 - .../monitoring/grafana/grafana-deploy.yaml | 120 - .../monitoring/grafana/grafana-svc.yaml | 18 - .../monitoring/heapster/heapster-deploy.yaml | 28 - .../monitoring/heapster/heapster-rbac.yaml | 36 - .../monitoring/heapster/heapster-svc.yaml | 23 - .../kube-state-metrics-deploy.yaml | 87 - .../kube-state-metrics-rbac.yaml | 103 - .../kube-state-metrics-svc.yaml | 23 - .../monitoring/namespace/namespace.yaml | 7 - .../node-export/node-exporter-deploy.yaml | 58 - .../node-export/node-exporter-svc.yaml | 16 - .../prometheus/prometheus-configMap.yaml | 224 - .../prometheus/prometheus-deploy.yaml | 80 - .../prometheus/prometheus-rbac.yaml | 39 - .../monitoring/prometheus/prometheus-svc.yaml | 18 - .../kubernetes/monitoring/start_monitoring.sh | 34 - .../kubernetes/monitoring/stop_monitoring.sh | 38 - doc/CDN-Transcode-Sample-Arch.vsdx | Bin 52491 -> 0 bytes ..._Transcode_Sample_Getting_Started_Guide.md | 121 - doc/CDN_Transcode_Sample_RA.md | 149 - doc/Remove_the_Kubernetes_environment.md | 9 - doc/Set_proxy_server.md | 66 - doc/Setup_Kubernetes_master_environment.md | 33 - ...up_Kubernetes_master_environment_CentOS.md | 37 - ...up_Kubernetes_master_environment_ubuntu.md | 32 - ...etup_Kubernetes_slaver_node_environment.md | 24 - doc/Setup_NFS_server_or_client.md | 16 - doc/Start_CDN_Transcode_Sample.md | 68 - doc/cmake.md | 34 + 48 files changed, 125 insertions(+), 13714 deletions(-) create mode 100644 deployment/kubernetes/README.md delete mode 100644 deployment/kubernetes/logging/elasticsearch/es-statefulSet.yaml delete mode 100644 deployment/kubernetes/logging/elasticsearch/es-svc.yaml delete mode 100644 deployment/kubernetes/logging/fluentd/fluentd-es-configMap.yaml delete mode 100644 deployment/kubernetes/logging/fluentd/fluentd-es-ds.yaml delete mode 100644 deployment/kubernetes/logging/kibana/kibana-configMap.yaml delete mode 100644 deployment/kubernetes/logging/kibana/kibana-deploy.yaml delete mode 100644 deployment/kubernetes/logging/kibana/kibana-svc.yaml delete mode 100755 deployment/kubernetes/logging/start_logging.sh delete mode 100755 deployment/kubernetes/logging/stop_logging.sh delete mode 100644 deployment/kubernetes/monitoring/alertmanager/alertmanager-configMapEmail.yaml delete mode 100644 deployment/kubernetes/monitoring/alertmanager/alertmanager-configMapRules.yaml delete mode 100644 deployment/kubernetes/monitoring/alertmanager/alertmanager-deploy.yaml delete mode 100644 deployment/kubernetes/monitoring/alertmanager/alertmanager-svc.yaml delete mode 100644 deployment/kubernetes/monitoring/grafana/grafana-configMapDashboardDefinitions.yaml delete mode 100644 deployment/kubernetes/monitoring/grafana/grafana-configMapDashboards.yaml delete mode 100644 deployment/kubernetes/monitoring/grafana/grafana-configMapDataSources.yaml delete mode 100644 deployment/kubernetes/monitoring/grafana/grafana-deploy.yaml delete mode 100644 deployment/kubernetes/monitoring/grafana/grafana-svc.yaml delete mode 100644 deployment/kubernetes/monitoring/heapster/heapster-deploy.yaml delete mode 100644 deployment/kubernetes/monitoring/heapster/heapster-rbac.yaml delete mode 100644 deployment/kubernetes/monitoring/heapster/heapster-svc.yaml delete mode 100644 deployment/kubernetes/monitoring/kube-state-metrics/kube-state-metrics-deploy.yaml delete mode 100644 deployment/kubernetes/monitoring/kube-state-metrics/kube-state-metrics-rbac.yaml delete mode 100644 deployment/kubernetes/monitoring/kube-state-metrics/kube-state-metrics-svc.yaml delete mode 100644 deployment/kubernetes/monitoring/namespace/namespace.yaml delete mode 100644 deployment/kubernetes/monitoring/node-export/node-exporter-deploy.yaml delete mode 100644 deployment/kubernetes/monitoring/node-export/node-exporter-svc.yaml delete mode 100644 deployment/kubernetes/monitoring/prometheus/prometheus-configMap.yaml delete mode 100644 deployment/kubernetes/monitoring/prometheus/prometheus-deploy.yaml delete mode 100644 deployment/kubernetes/monitoring/prometheus/prometheus-rbac.yaml delete mode 100644 deployment/kubernetes/monitoring/prometheus/prometheus-svc.yaml delete mode 100755 deployment/kubernetes/monitoring/start_monitoring.sh delete mode 100755 deployment/kubernetes/monitoring/stop_monitoring.sh delete mode 100644 doc/CDN-Transcode-Sample-Arch.vsdx delete mode 100644 doc/CDN_Transcode_Sample_Getting_Started_Guide.md delete mode 100644 doc/CDN_Transcode_Sample_RA.md delete mode 100644 doc/Remove_the_Kubernetes_environment.md delete mode 100644 doc/Set_proxy_server.md delete mode 100644 doc/Setup_Kubernetes_master_environment.md delete mode 100644 doc/Setup_Kubernetes_master_environment_CentOS.md delete mode 100644 doc/Setup_Kubernetes_master_environment_ubuntu.md delete mode 100644 doc/Setup_Kubernetes_slaver_node_environment.md delete mode 100644 doc/Setup_NFS_server_or_client.md delete mode 100644 doc/Start_CDN_Transcode_Sample.md create mode 100644 doc/cmake.md diff --git a/README.md b/README.md index 1eaa0d8..501ab09 100644 --- a/README.md +++ b/README.md @@ -1,46 +1,60 @@ -# Open Visual Cloud CDN Transcode Sample +### Open Visual Cloud CDN Transcode Sample [![Travis Build Status](https://travis-ci.com/OpenVisualCloud/CDN-Transcode-Sample.svg?branch=master)](https://travis-ci.com/OpenVisualCloud/CDN-Transcode-Sample) [![Stable release](https://img.shields.io/badge/latest_release-v1.0-green.svg)](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/releases/tag/v1.0) [![License](https://img.shields.io/badge/license-BSD_3_Clause-green.svg)](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/blob/master/LICENSE) [![Contributions](https://img.shields.io/badge/contributions-welcome-blue.svg)](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki) -The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box CDN media transcode service, including live streaming and video on demand. It also provides Docker-based media delivery software development environment upon which developer can easily build their specific applications. +The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box simple transcode or transcode+CDN service, including live streaming and video on demand. It also provides Docker-based media delivery software development environment upon which developer can easily build their specific applications. -# Architecture +### Architecture -The sample implements a reference server-side transcode system over CDN infrastructure, which features `live streaming` and `VOD`. +The sample implements a reference server-side transcode system over CDN infrastructure, which features `live streaming` and `VOD`. Among them, the `VOD` service can run independently to provide a simple transcode service. -# System requirements -## Operating system -The CDN Transcode Sample may run on Linux* 64 bit operating systems. The list below represents the operating systems that the transcode application and library were tested and validated on: -- Ubuntu* 18.04.2 Server LTS -- CentOS* 7.6 +### Software Stacks -# How to setup The CDN Transcode Sample +The sample is powered by the following Open Visual Cloud software stacks: + +- Media transcoding software stack: + +The FFmpeg-based media transcoding stack is used to transcode media content from a higher resolution/quality to a lower resolution/quality. The software stack is optimized for Intel Xeon Scalable Processors and Intel XeonE3 Scalable Processors. + +### Install Prerequisites: + +- **Time Zone**: Check that the timezone setting of your host machine is correctly configured. Timezone is used during build. If you plan to run the sample on a cluster of machines, please make sure to synchronize time among the controller node and worker nodes. + +- **Build Tools**: Install `cmake`, `make`, `m4`, `wget` and `gawk` if they are not available on your system. + +- **Docker Engine**: + + - Install [docker engine](https://docs.docker.com/get-docker). Minimum version required: `17.05`. Make sure you setup docker to run as a regular user. + - Setup `Kubernetes`. See [Kubernetes Setup](deployment/kubernetes/README.md) for additional setup details. + - Setup docker proxy as follows if you are behind a firewall: -## Setup CDN environment -### Install the third-party dependency Libraries and tools ``` -sudo -E ./script/install_dependency.sh +sudo mkdir -p /etc/systemd/system/docker.service.d +printf "[Service]\nEnvironment=\"HTTPS_PROXY=$https_proxy\" \"NO_PROXY=$no_proxy\"\n" | sudo tee /etc/systemd/system/docker.service.d/proxy.conf +sudo systemctl daemon-reload +sudo systemctl restart docker ``` -### Setup docker proxy as follows if you are behind a firewall + +### Build the Sample + +Run the following command to run the sample as a simple transcoder: ``` -sudo mkdir -p /etc/systemd/system/docker.service.d -printf "[Service]\nEnvironment=\"HTTPS_PROXY=$https_proxy\" \"NO_PROXY=$no_proxy\"\n" | sudo tee /etc/systemd/system/docker.service.d/proxy.conf -sudo systemctl daemon-reload -sudo systemctl restart docker +mkdir build +cd build +cmake .. +make ``` -## Build -Run below commands to build docker images +Run the following command to run the sample as transcode+CDN: ``` -cd CDN-Transcode-Sample mkdir build cd build -cmake .. +cmake -DSCENARIO=cdn .. make ``` @@ -52,33 +66,28 @@ To deploy without a private registry, run `make update` after each build to push --- -## Deploy -### Auto deployment using Kubernetes +### Deploy the Sample -**Tips:** It divides into two parts: -- [Setup Kubernetes for CentOS](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-for-CentOS) -- [Setup Kubernetes for Ubuntu](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-for-Ubuntu) -- [Setup NFS environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-NFS-environment) +Start/stop the sample with Kubernetes [yaml configurations](deployment/kubernetes/yaml): -Start CDN transcode service ``` make volume make start_kubernetes +... +make stop_kubernetes ``` -**Tips:** [Configuration example for Kubernetes deploy](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Configuration-example-for-Kubernetes-deploy) -Stop CDN transcode service +Start/stop the sample with Kubernetes [Helm charts](deployment/kubernetes/helm): + ``` -make stop_kubernetes +make volume +make start_helm +... +make stop_helm ``` -____ -### See Also -- [Deploy on Xeon E5 using docker swarm](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Deploy-on-Xeon-E5-using-docker-swarm) -- [Deploy on Xeon E5 using Kubernetes](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Deploy-on-Xeon-E5-using-Kubernetes) -- [Deploy on Xeon E3 with Gen GFX using Kubernetes](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Deploy-on-Xeon-E3-with-Gen-GFX-using-Kubernetes) -- [Deploy on VCA2 with Gen GFX using Kubernetes](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Deploy-on-VCA2-with-Gen-GFX-using-Kubernetes) -- [Setup proxy server](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-proxy-server) -- [Setup Kubernetes Logging](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-logging-environment) -- [Setup Kubernetes Monitoring](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-monitoring-environment) -- [Remove Kubernetes environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Remove-Kubernetes-environment) +# See Also + +- [Kubernetes Setup](deployment/kubernetes/README.md) +- [Build Options](doc/cmake.md) + diff --git a/deployment/kubernetes/README.md b/deployment/kubernetes/README.md new file mode 100644 index 0000000..f52bb9b --- /dev/null +++ b/deployment/kubernetes/README.md @@ -0,0 +1,39 @@ + +The CDN-Transcode sample can be deployed with Kubernetes. + +### Kubernetes Setup + +- Follow the [instructions](https://kubernetes.io/docs/setup) to setup your Kubernetes cluster. + +- Optional: setup password-less access from the Kubernetes controller to each worker node (required by `make update`): + +``` +ssh-keygen +ssh-copy-id +``` + +- Start/stop services as follows: + +``` +mkdir build +cd build +cmake .. +make +make update # optional for private docker registry +make volume +make start_kubernetes +make stop_kubernetes +``` + +--- + +The command ```make update``` uploads the sample images to each worker node. If you prefer to use a private docker registry, configure the sample, `cmake -DREGISTRY= ..`, to push images to the private registry after each build. +- The `make volume` command creates local persistent volumes under the `/tmp` directory of the first two Kubernetes workers. This is a temporary solution for quick sample deployment. For scalability beyond a two-node cluster, consider rewriting the persistent volume scripts. + +--- + +### See Also + +- [Helm Charts](helm/cdn-transcode/README.md) +- [CMake Options](../../doc/cmake.md) + diff --git a/deployment/kubernetes/helm/cdn-transcode/README.md b/deployment/kubernetes/helm/cdn-transcode/README.md index cb75d74..2767794 100644 --- a/deployment/kubernetes/helm/cdn-transcode/README.md +++ b/deployment/kubernetes/helm/cdn-transcode/README.md @@ -1,5 +1,5 @@ -The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box CDN media transcode service, including live streaming and video on demand. It also provides docker-based media delivery software development environment upon which developer can easily build their specific applications. +The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box simple transcode or CDN media transcode service, including live streaming and video on demand. It also provides docker-based media delivery software development environment upon which developer can easily build their specific applications. ### Prerequisites: diff --git a/deployment/kubernetes/logging/elasticsearch/es-statefulSet.yaml b/deployment/kubernetes/logging/elasticsearch/es-statefulSet.yaml deleted file mode 100644 index ba32a53..0000000 --- a/deployment/kubernetes/logging/elasticsearch/es-statefulSet.yaml +++ /dev/null @@ -1,109 +0,0 @@ -# RBAC authn and authz -apiVersion: v1 -kind: ServiceAccount -metadata: - name: elasticsearch-logging - namespace: kube-system - labels: - k8s-app: elasticsearch-logging - addonmanager.kubernetes.io/mode: Reconcile ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: elasticsearch-logging - labels: - k8s-app: elasticsearch-logging - addonmanager.kubernetes.io/mode: Reconcile -rules: -- apiGroups: - - "" - resources: - - "services" - - "namespaces" - - "endpoints" - verbs: - - "get" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: kube-system - name: elasticsearch-logging - labels: - k8s-app: elasticsearch-logging - addonmanager.kubernetes.io/mode: Reconcile -subjects: -- kind: ServiceAccount - name: elasticsearch-logging - namespace: kube-system - apiGroup: "" -roleRef: - kind: ClusterRole - name: elasticsearch-logging - apiGroup: "" ---- -# Elasticsearch deployment itself -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: elasticsearch-logging - namespace: kube-system - labels: - k8s-app: elasticsearch-logging - addonmanager.kubernetes.io/mode: Reconcile -spec: - serviceName: elasticsearch-logging - replicas: 1 - selector: - matchLabels: - k8s-app: elasticsearch-logging - template: - metadata: - labels: - k8s-app: elasticsearch-logging - spec: - serviceAccountName: elasticsearch-logging - containers: - - image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0 - name: elasticsearch-logging - resources: - limits: - cpu: 1000m - memory: 3Gi - requests: - cpu: 100m - memory: 3Gi - ports: - - containerPort: 9200 - name: db - protocol: TCP - - containerPort: 9300 - name: transport - protocol: TCP - volumeMounts: - - name: elasticsearch-logging - mountPath: /data - env: - - name: node.name - value: node_name - - name: cluster.initial_master_nodes - value: node_name - - name: "NAMESPACE" - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumes: - - name: elasticsearch-logging - emptyDir: {} - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - key: "node-role.kubernetes.io/master" - effect: "NoSchedule" - initContainers: - - image: alpine:3.6 - command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] - name: elasticsearch-logging-init - securityContext: - privileged: true diff --git a/deployment/kubernetes/logging/elasticsearch/es-svc.yaml b/deployment/kubernetes/logging/elasticsearch/es-svc.yaml deleted file mode 100644 index 7f3a2f6..0000000 --- a/deployment/kubernetes/logging/elasticsearch/es-svc.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: elasticsearch-logging - namespace: kube-system - labels: - k8s-app: elasticsearch-logging - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/name: "Elasticsearch" -spec: - ports: - - port: 9200 - protocol: TCP - targetPort: 9200 - nodePort: 9200 - selector: - k8s-app: elasticsearch-logging - type: NodePort diff --git a/deployment/kubernetes/logging/fluentd/fluentd-es-configMap.yaml b/deployment/kubernetes/logging/fluentd/fluentd-es-configMap.yaml deleted file mode 100644 index 5d19f83..0000000 --- a/deployment/kubernetes/logging/fluentd/fluentd-es-configMap.yaml +++ /dev/null @@ -1,449 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: fluentd-es-config - namespace: kube-system - labels: - addonmanager.kubernetes.io/mode: Reconcile -data: - system.conf: |- - - root_dir /tmp/fluentd-buffers/ - - containers.input.conf: |- - # This configuration file for Fluentd / td-agent is used - # to watch changes to Docker log files. The kubelet creates symlinks that - # capture the pod name, namespace, container name & Docker container ID - # to the docker logs for pods in the /var/log/containers directory on the host. - # If running this fluentd configuration in a Docker container, the /var/log - # directory should be mounted in the container. - # - # These logs are then submitted to Elasticsearch which assumes the - # installation of the fluent-plugin-elasticsearch & the - # fluent-plugin-kubernetes_metadata_filter plugins. - # See https://github.com/uken/fluent-plugin-elasticsearch & - # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for - # more information about the plugins. - # - # Example - # ======= - # A line in the Docker log file might look like this JSON: - # - # {"log":"2014/09/25 21:15:03 Got request with path wombat\n", - # "stream":"stderr", - # "time":"2014-09-25T21:15:03.499185026Z"} - # - # The time_format specification below makes sure we properly - # parse the time format produced by Docker. This will be - # submitted to Elasticsearch and should appear like: - # $ curl 'http://elasticsearch-logging:9200/_search?pretty' - # ... - # { - # "_index" : "logstash-2014.09.25", - # "_type" : "fluentd", - # "_id" : "VBrbor2QTuGpsQyTCdfzqA", - # "_score" : 1.0, - # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n", - # "stream":"stderr","tag":"docker.container.all", - # "@timestamp":"2014-09-25T22:45:50+00:00"} - # }, - # ... - # - # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log - # record & add labels to the log record if properly configured. This enables users - # to filter & search logs on any metadata. - # For example a Docker container's logs might be in the directory: - # - # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b - # - # and in the file: - # - # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log - # - # where 997599971ee6... is the Docker ID of the running container. - # The Kubernetes kubelet makes a symbolic link to this file on the host machine - # in the /var/log/containers directory which includes the pod name and the Kubernetes - # container name: - # - # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log - # -> - # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log - # - # The /var/log directory on the host is mapped to the /var/log directory in the container - # running this instance of Fluentd and we end up collecting the file: - # - # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log - # - # This results in the tag: - # - # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log - # - # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name - # which are added to the log message as a kubernetes field object & the Docker container ID - # is also added under the docker field object. - # The final tag is: - # - # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log - # - # And the final log record look like: - # - # { - # "log":"2014/09/25 21:15:03 Got request with path wombat\n", - # "stream":"stderr", - # "time":"2014-09-25T21:15:03.499185026Z", - # "kubernetes": { - # "namespace": "default", - # "pod_name": "synthetic-logger-0.25lps-pod", - # "container_name": "synth-lgr" - # }, - # "docker": { - # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b" - # } - # } - # - # This makes it easier for users to search for logs by pod name or by - # the name of the Kubernetes container regardless of how many times the - # Kubernetes pod has been restarted (resulting in a several Docker container IDs). - # Json Log Example: - # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} - # CRI Log Example: - # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here - - @id fluentd-containers.log - @type tail - path /var/log/containers/*.log - pos_file /var/log/es-containers.log.pos - tag raw.kubernetes.* - read_from_head true - - @type multi_format - - format json - time_key time - time_format %Y-%m-%dT%H:%M:%S.%NZ - - - format /^(? - - - # Detect exceptions in the log output and forward them as one log entry. - - @id raw.kubernetes - @type detect_exceptions - remove_tag_prefix raw - message log - stream stream - multiline_flush_interval 5 - max_bytes 500000 - max_lines 1000 - - # Concatenate multi-line logs - - @id filter_concat - @type concat - key message - multiline_end_regexp /\n$/ - separator "" - - # Enriches records with Kubernetes metadata - - @id filter_kubernetes_metadata - @type kubernetes_metadata - - # Fixes json fields in Elasticsearch - - @id filter_parser - @type parser - key_name log - reserve_data true - remove_key_name_field true - - @type multi_format - - format json - - - format none - - - - system.input.conf: |- - # Example: - # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 - - @id minion - @type tail - format /^(?