diff --git a/.gitignore b/.gitignore
index 882027a..1407116 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,7 @@ volume/video/*
deployment/docker-swarm/dhparam.pem
deployment/docker-swarm/self.crt
deployment/docker-swarm/self.key
+deployment/kubernetes/*.yaml
+deployment/kubernetes/yaml/*.yaml
+deployment/certificate/self.crt
+deployment/certificate/self.key
diff --git a/ATTRIBUTION.md b/ATTRIBUTION.md
new file mode 100644
index 0000000..47b38cc
--- /dev/null
+++ b/ATTRIBUTION.md
@@ -0,0 +1,51 @@
+Component: Apache python3-kazoo
+Repository: https://github.com/python-zk/kazoo
+License: Apache License 2.0 (https://github.com/python-zk/kazoo/blob/master/LICENSE)
+
+Component: BeautifulSoup4
+Repository: https://github.com/il-vladislav/BeautifulSoup4/
+License: MIT (https://pypi.org/project/beautifulsoup4/)
+
+Component: confluentinc/cp-docker-images
+Repository: https://github.com/confluentinc/cp-docker-images
+License: Apache License 2.0 (https://github.com/confluentinc/cp-docker-images/blob/5.3.3-post/LICENSE)
+
+Component: dashjs-tmp-nicky
+Repository: https://github.com/Dash-Industry-Forum/dash.js/
+License: BSD 3-clause "New" or "Revised" License (https://github.com/Dash-Industry-Forum/dash.js/blob/development/LICENSE.md)
+
+Component: jQuery (New Wave JavaScript)
+Repository: https://github.com/jquery/jquery
+License: MIT (https://github.com/jquery/jquery/blob/master/LICENSE.txt)
+
+Component: kafka-python
+Repository: https://github.com/dpkp/kafka-python/
+License: MIT (https://github.com/dpkp/kafka-python/blob/master/LICENSE)
+
+Component: What Input
+Repository: https://github.com/ten1seven/what-input
+License: MIT (https://github.com/ten1seven/what-input/blob/master/LICENSE)
+
+Component: Tornado Tornado Web Server
+Repository: https://github.com/tornadoweb/tornado
+License: Apache License 2.0 (https://github.com/tornadoweb/tornado/blob/master/LICENSE)
+
+Component: video-dev hls.js
+Repository: https://github.com/video-dev/hls.js
+License: Apache License 2.0 (https://github.com/video-dev/hls.js/blob/master/LICENSE)
+
+Component: Foundation for Sites
+Repository: https://github.com/zurb/foundation-sites-6
+License: MIT (https://github.com/zurb/foundation-sites-6/blob/develop/LICENSE)
+
+Component: Netflix/vmaf
+Repository: https://github.com/Netflix/vmaf
+License: BSD-2-Clause-Patent (https://github.com/Netflix/vmaf/blob/master/LICENSE)
+
+Component: Jquery-Magnific-Popup
+Repository: https://github.com/dimsemenov/Magnific-Popup
+License: MIT (https://github.com/dimsemenov/Magnific-Popup/blob/master/LICENSE)
+
+Component: bootstrap.min.js
+Repository: https://github.com/twbs/bootstrap
+License: MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3765883..5ed6841 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,6 +1,30 @@
cmake_minimum_required (VERSION 2.8)
-Project(OVC NONE)
+Project(CDNTranscodeSample NONE)
+
+if (NOT DEFINED REGISTRY)
+ set(REGISTRY "")
+elseif (NOT ${REGISTRY} MATCHES "/$")
+ set(REGISTRY "${REGISTRY}/")
+endif()
+if (NOT DEFINED NVODS)
+ set(NVODS "2")
+endif()
+if (NOT DEFINED NLIVES)
+ set(NLIVES "1")
+endif()
+
+if (NOT DEFINED PLATFORM)
+ set(PLATFORM "Xeon")
+endif()
+
+if (NOT DEFINED SCENARIO)
+ set(SCENARIO "transcode")
+endif()
+
+if (NOT (SCENARIO STREQUAL "cdn"))
+ set(NLIVES "0")
+endif()
file(GLOB dirs "deployment" "*")
list(REMOVE_DUPLICATES dirs)
@@ -12,3 +36,6 @@ endforeach()
# legal message
execute_process(COMMAND printf "\nThis script will build third party components licensed under various open source licenses into your container images. The terms under which those components may be used and distributed can be found with the license document that is provided with those components. Please familiarize yourself with those terms to ensure your distribution of those components complies with the terms of those licenses.\n\n")
+execute_process(COMMAND printf "\n-- Setting: PLATFORM=${PLATFORM} SCENARIO=${SCENARIO}\n")
+execute_process(COMMAND printf "-- Setting: NVODS=${NVODS}, NLIVES=${NLIVES}\n")
+execute_process(COMMAND printf "-- Setting: REGISTRY=${REGISTRY}\n")
diff --git a/README.md b/README.md
index f989ff2..f3ce0f4 100644
--- a/README.md
+++ b/README.md
@@ -1,108 +1,108 @@
-# Open Visual Cloud CDN Transcode Sample
+# PROJECT NOT UNDER ACTIVE MANAGEMENT #
+This project will no longer be maintained by Intel.
+Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project.
+Intel no longer accepts patches to this project.
+ If you have an ongoing need to use this project, are interested in independently developing it, or would like to maintain patches for the open source software community, please create your own fork of this project.
+
+
+
+### Open Visual Cloud CDN Transcode Sample
[](https://travis-ci.com/OpenVisualCloud/CDN-Transcode-Sample)
[](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/releases/tag/v1.0)
[](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/blob/master/LICENSE)
[](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki)
-Table of Contents
-=================
- * [Open Visual Cloud CDN Transcode Sample](#open-visual-cloud-cdn-transcode-sample)
- * [Architecture](#architecture)
- * [What's in this project](#whats-in-this-project)
- * [System requirements](#system-requirements)
- * [Operating system](#operating-system)
- * [How to setup The CDN Transcode Sample](#how-to-setup-the-cdn-transcode-sample)
- * [Setup the CDN Transcode Sample OS environment(Both of master and slave nodes)](#setup-the-cdn-transcode-sample-os-environmentboth-of-master-and-slave-nodes)
- * [Install ubuntu18.04.2/CentOS 7.6](#install-ubuntu18042centos-76)
- * [Setup CDN environment(Both of master and slave nodes)](#setup-cdn-environmentboth-of-master-and-slave-nodes)
- * [Install the third-party dependency Libraries and tools](#install-the-third-party-dependency-libraries-and-tools)
- * [Setup docker proxy as follows if you are behind a firewall](#setup-docker-proxy-as-follows-if-you-are-behind-a-firewall)
- * [Build(Both of master and slave nodes)](#buildboth-of-master-and-slave-nodes)
- * [Deploy](#deploy)
- * [Auto deployment using Kubernetes](#auto-deployment-using-kubernetes)
- * [See Also](#see-also)
-
-The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box CDN media transcode service, including live streaming and video on demand. It also provides Docker-based media delivery software development environment upon which developer can easily build their specific applications.
-
-# Architecture
-
-The sample implements a reference server-side transcode system over CDN infrastructure, which features `live streaming` and `VOD`.
-
-
-
-# What's in this project
-The CDN Transcode Sample contains below components:
-- Dockerfiles
-- Python web services source code
-- BASH glue scripts
-- HTML web pages
-- CMakefiles
-- Configuration files
-- Documents
-
-# System requirements
-## Operating system
-The CDN Transcode Sample may run on Linux* 64 bit operating systems. The list below represents the operating systems that the transcode application and library were tested and validated on:
-- Ubuntu* 18.04.2 Server LTS
-- CentOS* 7.6
-
-# How to setup The CDN Transcode Sample
-## Setup the CDN Transcode Sample OS environment(Both of master and slave nodes)
-Install Ubuntu 18.04.2/CentOS 7.6 on CDN-Transcode Server, and configure the IP address & proxy properly.
-### Install ubuntu18.04.2/CentOS 7.6
-- [Download Ubuntu and Install](https://ubuntu.com/download)
-- [Download CentOS and install](https://www.centos.org/download/)
-
-## Setup CDN environment(Both of master and slave nodes)
-### Install the third-party dependency Libraries and tools
+The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box simple transcode or transcode+CDN service, including live streaming and video on demand. It also provides Docker-based media delivery software development environment upon which developer can easily build their specific applications.
+
+### Architecture
+
+The sample implements a reference server-side transcode system over CDN infrastructure, which features `live streaming` and `VOD`. Among them, the `VOD` service can run independently to provide a simple transcode service.
+
+
+
+### Software Stacks
+
+The sample is powered by the following Open Visual Cloud software stacks:
+
+- Media transcoding software stack:
+
+The FFmpeg-based media transcoding stack is used to transcode media content from a higher resolution/quality to a lower resolution/quality. The software stack is optimized for Intel Xeon Scalable Processors and Intel XeonE3 Scalable Processors.
+
+- Media streaming and Web Hosting software stack:
+
+The NGINX-based software stack is used to host web services, video content and provide video streaming services. The software stack is optimized for Intel Xeon Scalable Processors.
+
+### Install Prerequisites:
+
+- **Time Zone**: Check that the timezone setting of your host machine is correctly configured. Timezone is used during build. If you plan to run the sample on a cluster of machines, please make sure to synchronize time among the controller node and worker nodes.
+
+- **Build Tools**: Install `cmake`, `make`, `m4`, `wget` and `gawk` if they are not available on your system.
+
+- **Docker Engine**:
+
+ - Install [docker engine](https://docs.docker.com/get-docker). Minimum version required: `17.05`. Make sure you setup docker to run as a regular user.
+ - Setup `Kubernetes`. See [Kubernetes Setup](deployment/kubernetes/README.md) for additional setup details.
+ - Setup docker proxy as follows if you are behind a firewall:
+
```
-sudo -E ./script/install_dependency.sh
+sudo mkdir -p /etc/systemd/system/docker.service.d
+printf "[Service]\nEnvironment=\"HTTPS_PROXY=$https_proxy\" \"NO_PROXY=$no_proxy\"\n" | sudo tee /etc/systemd/system/docker.service.d/proxy.conf
+sudo systemctl daemon-reload
+sudo systemctl restart docker
```
-### Setup docker proxy as follows if you are behind a firewall
+
+### Build the Sample
+
+Run the following command to run the sample as a simple transcoder:
```
-sudo mkdir -p /etc/systemd/system/docker.service.d
-printf "[Service]\nEnvironment=\"HTTPS_PROXY=$https_proxy\" \"NO_PROXY=$no_proxy\"\n" | sudo tee /etc/systemd/system/docker.service.d/proxy.conf
-sudo systemctl daemon-reload
-sudo systemctl restart docker
+mkdir build
+cd build
+cmake ..
+make
```
-## Build(Both of master and slave nodes)
-Run below commands to build docker images
+
+Run the following command to run the sample as transcode+CDN:
```
-cd CDN-Transcode-Sample
mkdir build
cd build
-cmake ..
+cmake -DSCENARIO=cdn ..
make
```
-## Deploy
-### Auto deployment using Kubernetes
+---
+
+If you deploy the sample to a cluster, please configure the sample, as `cmake -DREGISTRY= ..`, to push the sample images to the private docker registry after each build.
+
+To deploy without a private registry, run `make update` after each build to push the sample images to the cluster nodes (which requires passwordless access from the master node to the worker nodes.)
-**Tips:** It divides into two parts: master or slave ones
-- [Setup Kubernetes master environment for CentOS](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-master-environment-for-CentOS)
-- [Setup Kubernetes master environment for Ubuntu](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-master-environment-for-Ubuntu)
-- [Setup Kubernetes slave environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-slave-environment)
-- [Setup NFS environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-NFS-environment)
+---
+
+### Deploy the Sample
+
+Start/stop the sample with Kubernetes [yaml configurations](deployment/kubernetes/yaml):
-Start CDN transcode service
```
+make volume
make start_kubernetes
+...
+make stop_kubernetes
```
-**Tips:** [Configuration example for Kubernetes deploy](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Configuration-example-for-Kubernetes-deploy)
-Stop CDN transcode service
+Start/stop the sample with Kubernetes [Helm charts](deployment/kubernetes/helm):
+
```
-make stop_kubernetes
+make volume
+make start_helm
+...
+make stop_helm
```
-____
-### See Also
-- [Deploy on Xeon E5 using docker swarm](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Deploy-on-Xeon-E5-using-docker-swarm)
-- [Deploy on Xeon E5 using Kubernetes](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Deploy-on-Xeon-E5-using-Kubernetes)
-- [Deploy on Xeon E3 with Gen GFX using Kubernetes](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Deploy-on-Xeon-E3-with-Gen-GFX-using-Kubernetes)
-- [Deploy on VCA2 with Gen GFX using Kubernetes](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Deploy-on-VCA2-with-Gen-GFX-using-Kubernetes)
-- [Setup proxy server](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-proxy-server)
-- [Setup Kubernetes Logging](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-logging-environment)
-- [Setup Kubernetes Monitoring](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Setup-Kubernetes-monitoring-environment)
-- [Remove Kubernetes environment](https://github.com/OpenVisualCloud/CDN-Transcode-Sample/wiki/Remove-Kubernetes-environment)
+For the `transcode` scenario, look at the logs of the `benchmark` pod for the batch transcoding summary. For the `cdn` scenario, point your browser to `https://` to watch the list of video clips via `DASH` or `HLS`.
+
+# See Also
+
+- [Kubernetes Setup](deployment/kubernetes/README.md)
+- [Build Options](doc/cmake.md)
+- [Reference Architecture](https://networkbuilders.intel.com/solutionslibrary/container-bare-metal-for-2nd-generation-intel-xeon-scalable-processor)
+
diff --git a/benchmark/.dockerignore b/benchmark/.dockerignore
new file mode 100644
index 0000000..596bd5f
--- /dev/null
+++ b/benchmark/.dockerignore
@@ -0,0 +1,3 @@
+CMakeLists.txt
+*.m4
+test/*
diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt
new file mode 100644
index 0000000..b291929
--- /dev/null
+++ b/benchmark/CMakeLists.txt
@@ -0,0 +1,5 @@
+if (SCENARIO STREQUAL "transcode")
+ set(service "tc_benchmark_service")
+ include("${CMAKE_SOURCE_DIR}/script/service.cmake")
+ add_dependencies(build_${service} build_tc_common)
+endif()
diff --git a/benchmark/Dockerfile b/benchmark/Dockerfile
new file mode 100644
index 0000000..b7007d4
--- /dev/null
+++ b/benchmark/Dockerfile
@@ -0,0 +1,15 @@
+
+FROM tc_common
+
+Run apt-get update -q && apt-get install -y -q python3-kafka python3-kazoo && rm -rf /var/lib/apt/lists/*
+
+COPY *.py /home/
+COPY *.json /home/
+CMD ["/bin/bash","-c","/home/main.py"]
+WORKDIR /home
+
+####
+ARG UID
+RUN mkdir -p /var/www/archive
+USER ${UID}
+####
diff --git a/benchmark/build.sh b/benchmark/build.sh
new file mode 100755
index 0000000..bd6a09d
--- /dev/null
+++ b/benchmark/build.sh
@@ -0,0 +1,6 @@
+#!/bin/bash -e
+
+IMAGE="tc_benchmark_service"
+DIR=$(dirname $(readlink -f "$0"))
+
+. "${DIR}/../script/build.sh"
diff --git a/benchmark/main.py b/benchmark/main.py
new file mode 100755
index 0000000..cdf94b1
--- /dev/null
+++ b/benchmark/main.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python3
+
+from messaging import Producer, Consumer
+from os import listdir, walk
+from os.path import getsize
+import time
+import json
+import psutil
+import re
+
+KAFKA_TOPIC = "content_provider_sched"
+KAFKA_WORKLOAD_TOPIC = "transcoding"
+
+ARCHIVE_ROOT = "/var/www/archive"
+TARGET_ROOT = "/var/www/video"
+log_file = TARGET_ROOT+"/log.txt"
+
+config_file="/home/transcoding.json"
+
+streams = [s for s in listdir(ARCHIVE_ROOT) if s.endswith((".mp4", ".avi"))]
+
+jobs=[]
+with open(config_file,"rt") as fd:
+ jobs=json.load(fd)
+
+print("Submit jobs:", flush=True)
+# ingest jobs to start transcoding
+producer = Producer()
+idx=0
+for idx1,msg in enumerate(jobs):
+ # schedule producing the stream
+ name_pattern=msg["name"]
+ for stream1 in streams:
+ if re.search(name_pattern, stream1):
+ msg.update({"idx": str(idx), "name": stream1})
+ print(msg,flush=True)
+ idx=idx+1
+
+ while True:
+ try:
+ producer.send(KAFKA_TOPIC, json.dumps(msg))
+ break
+ except Exception as e:
+ print("Exception: {}".format(e))
+ time.sleep(5)
+
+# show transcoding statistics
+
+def stats_fileinfo(root):
+ nfiles=0
+ size=0
+ for path, dirs, files in walk(root):
+ for stream1 in files:
+ if stream1.endswith((".mp4", ".avi", ".ts")):
+ nfiles=nfiles+1
+ size=size+getsize(path+"/"+stream1)
+ return (nfiles, size)
+
+c = Consumer(None)
+
+info={
+ "summary":{
+ "cpu": round(psutil.cpu_percent(),2),
+ "mem": round(int(psutil.virtual_memory().total - psutil.virtual_memory().free) / float(psutil.virtual_memory().total), 2),
+ "active":0,
+ "completed":0,
+ "aborted":0
+ },
+ "active_task":[],
+ "completed_task":[],
+ "aborted_task":[]
+ }
+
+def status_check(item, status):
+ return isinstance(item,dict) and "status" in item.keys() and item["status"] == status
+
+def process_message(msg,sinfo):
+ msg=json.loads(message)
+ sinfo.update({msg["id"]:msg})
+ active=[ item["id"] for k,item in sinfo.items() if status_check(item, "active")]
+ completed=[ item["id"] for k,item in sinfo.items() if status_check(item, "completed")]
+ aborted=[ item["id"] for k,item in sinfo.items() if status_check(item, "aborted")]
+ sinfo.update({
+ "summary":{
+ "cpu": round(psutil.cpu_percent(),2),
+ "mem": round(int(psutil.virtual_memory().total - psutil.virtual_memory().free) / float(psutil.virtual_memory().total), 2),
+ "active": len(active),
+ "completed": len(completed),
+ "aborted": len(aborted)
+ },
+ "active_task":active,
+ "completed_task":completed,
+ "aborted_task":aborted
+ })
+ return active,completed,aborted
+
+def log_info(sinfo):
+ with open(log_file, "w") as f:
+ for k,v in sinfo.items():
+ f.write(str(k)+": "+json.dumps(v))
+ f.write("\n")
+
+def format_info(sinfo,task_list):
+ print("\n", flush=True)
+ for k,v in sinfo.items():
+ if k in task_list + ["summary","active_task","completed_task","aborted_task"]:
+ print(k,v, flush=True)
+
+while True:
+ try:
+ print("Waiting...",flush=True)
+ for message in c.messages(KAFKA_WORKLOAD_TOPIC):
+ active,completed,aborted = process_message(message,info)
+ log_info(info)
+ format_info(info,active)
+ except Exception as e:
+ print("Exception: {}".format(e))
+ time.sleep(2)
diff --git a/benchmark/shell.sh b/benchmark/shell.sh
new file mode 100755
index 0000000..e5a5ddd
--- /dev/null
+++ b/benchmark/shell.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -e
+
+IMAGE="tc_benchmark_service"
+DIR=$(dirname $(readlink -f "$0"))
+OPTIONS=("--volume=${DIR}/../../volume/video/archive:/var/www/archive:ro" "--volume=${DIR}/../../volume/video/dash:/var/www/dash:ro" "--volume=${DIR}/../../volume/video/hls:/var/www/hls:ro")
+
+. "${DIR}/../script/shell.sh"
diff --git a/benchmark/transcoding.json b/benchmark/transcoding.json
new file mode 100644
index 0000000..a07e923
--- /dev/null
+++ b/benchmark/transcoding.json
@@ -0,0 +1,153 @@
+[{
+ "name": ".mp4$",
+ "parameters": {
+ "renditions":[[1920, 1080, 3280000, 192000]],
+ "codec_type":"HEVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "9",
+ "profile": "main",
+ "refs": "2",
+ "forced_idr": "1"
+ },
+ "loop": 0,
+ "output": {
+ "target": "file",
+ "type": "mp4"
+ },
+ "live_vod": "live"
+},{
+ "name": ".mp4$",
+ "parameters": {
+ "renditions":[[1920, 1080, 4100000, 192000]],
+ "codec_type":"AVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "veryfast",
+ "profile": "main",
+ "refs": "2",
+ "forced_idr": "1"
+ },
+ "loop": 0,
+ "output": {
+ "target": "file",
+ "type": "mp4"
+ },
+ "live_vod": "live"
+},{
+ "name": ".mp4$",
+ "parameters": {
+ "renditions":[[1280, 720, 1680000, 192000]],
+ "codec_type":"HEVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "9",
+ "profile": "main",
+ "refs": "2",
+ "forced_idr": "1"
+ },
+ "loop": 0,
+ "output": {
+ "target": "file",
+ "type": "mp4"
+ },
+ "live_vod": "live"
+},{
+ "name": ".mp4$",
+ "parameters": {
+ "renditions":[[1280, 720, 2100000, 192000]],
+ "codec_type":"AVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "veryfast",
+ "profile": "main",
+ "refs": "2",
+ "forced_idr": "1"
+ },
+ "loop": 0,
+ "output": {
+ "target": "file",
+ "type": "mp4"
+ },
+ "live_vod": "live"
+},{
+ "name": ".mp4$",
+ "parameters": {
+ "renditions":[[640, 480, 960000, 128000]],
+ "codec_type":"HEVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "9",
+ "profile": "main",
+ "refs": "2",
+ "forced_idr": "1"
+ },
+ "loop": 0,
+ "output": {
+ "target": "file",
+ "type": "mp4"
+ },
+ "live_vod": "live"
+},{
+ "name": ".mp4$",
+ "parameters": {
+ "renditions":[[640, 480, 1200000, 128000]],
+ "codec_type":"AVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "veryfast",
+ "profile": "main",
+ "refs": "2",
+ "forced_idr": "1"
+ },
+ "loop": 0,
+ "output": {
+ "target": "file",
+ "type": "mp4"
+ },
+ "live_vod": "live"
+},{
+ "name": ".mp4$",
+ "parameters": {
+ "renditions":[[480, 360, 440000, 128000]],
+ "codec_type":"HEVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "9",
+ "profile": "main",
+ "refs": "2",
+ "forced_idr": "1"
+ },
+ "loop": 0,
+ "output": {
+ "target": "file",
+ "type": "mp4"
+ },
+ "live_vod": "live"
+},{
+ "name": ".mp4$",
+ "parameters": {
+ "renditions":[[480, 360, 550000, 128000]],
+ "codec_type":"AVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "veryfast",
+ "profile": "main",
+ "refs": "2",
+ "forced_idr": "1"
+ },
+ "loop": 0,
+ "output": {
+ "target": "file",
+ "type": "mp4"
+ },
+ "live_vod": "live"
+}]
diff --git a/cdn-server/CMakeLists.txt b/cdn-server/CMakeLists.txt
index 0fd8b9e..f9d93fb 100644
--- a/cdn-server/CMakeLists.txt
+++ b/cdn-server/CMakeLists.txt
@@ -1,2 +1,4 @@
-set(service "ovc_cdn_service")
-include("${CMAKE_SOURCE_DIR}/script/service.cmake")
+if (SCENARIO STREQUAL "cdn")
+ set(service "tc_cdn_service")
+ include("${CMAKE_SOURCE_DIR}/script/service.cmake")
+endif()
diff --git a/cdn-server/Dockerfile b/cdn-server/Dockerfile
index 1470cc3..eccaa12 100644
--- a/cdn-server/Dockerfile
+++ b/cdn-server/Dockerfile
@@ -1,11 +1,23 @@
-FROM openvisualcloud/xeon-ubuntu1804-media-nginx:20.3.1
+FROM openvisualcloud/xeon-ubuntu1804-media-nginx:21.3
-Run DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-setuptools python3-redis python-celery-common python3-tornado python3-kafka python3-kazoo vim openssh-server && rm -rf /var/lib/apt/lists/*
+Run DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-setuptools python3-redis python-celery-common python3-tornado python3-kafka python3-kazoo openssh-server && rm -rf /var/lib/apt/lists/*
COPY *.xsl /etc/nginx/
COPY *.conf /etc/nginx/
+COPY html /var/www/html
COPY *.py /home/
CMD ["/bin/bash","-c","/home/main.py&/usr/local/sbin/nginx"]
WORKDIR /home
-EXPOSE 8080
+
+####
+ARG UID
+ARG GID
+## must use ; here to ignore user exist status code
+RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \
+ [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \
+ touch /var/run/nginx.pid && \
+ mkdir -p /var/log/nginx /var/lib/nginx /var/www/video /var/www/archive && \
+ chown -R ${UID}:${GID} /var/run/nginx.pid /var/www /var/log/nginx /var/lib/nginx
+USER ${UID}
+####
diff --git a/cdn-server/build.sh b/cdn-server/build.sh
index edf72d2..f3b188d 100755
--- a/cdn-server/build.sh
+++ b/cdn-server/build.sh
@@ -1,6 +1,6 @@
#!/bin/bash -e
-IMAGE="ovc_cdn_service"
+IMAGE="tc_cdn_service"
DIR=$(dirname $(readlink -f "$0"))
. "${DIR}/../script/build.sh"
diff --git a/volume/html/css/app.css b/cdn-server/html/css/app.css
similarity index 100%
rename from volume/html/css/app.css
rename to cdn-server/html/css/app.css
diff --git a/volume/html/css/foundation.css b/cdn-server/html/css/foundation.css
similarity index 100%
rename from volume/html/css/foundation.css
rename to cdn-server/html/css/foundation.css
diff --git a/volume/html/css/foundation.min.css b/cdn-server/html/css/foundation.min.css
similarity index 100%
rename from volume/html/css/foundation.min.css
rename to cdn-server/html/css/foundation.min.css
diff --git a/volume/html/favicon.ico b/cdn-server/html/favicon.ico
similarity index 100%
rename from volume/html/favicon.ico
rename to cdn-server/html/favicon.ico
diff --git a/volume/html/header.shtml b/cdn-server/html/header.shtml
similarity index 100%
rename from volume/html/header.shtml
rename to cdn-server/html/header.shtml
diff --git a/volume/html/icon/foundation-icons.css b/cdn-server/html/icon/foundation-icons.css
similarity index 100%
rename from volume/html/icon/foundation-icons.css
rename to cdn-server/html/icon/foundation-icons.css
diff --git a/volume/html/icon/foundation-icons.eot b/cdn-server/html/icon/foundation-icons.eot
similarity index 100%
rename from volume/html/icon/foundation-icons.eot
rename to cdn-server/html/icon/foundation-icons.eot
diff --git a/volume/html/icon/foundation-icons.svg b/cdn-server/html/icon/foundation-icons.svg
similarity index 100%
rename from volume/html/icon/foundation-icons.svg
rename to cdn-server/html/icon/foundation-icons.svg
diff --git a/volume/html/icon/foundation-icons.ttf b/cdn-server/html/icon/foundation-icons.ttf
similarity index 100%
rename from volume/html/icon/foundation-icons.ttf
rename to cdn-server/html/icon/foundation-icons.ttf
diff --git a/volume/html/icon/foundation-icons.woff b/cdn-server/html/icon/foundation-icons.woff
similarity index 100%
rename from volume/html/icon/foundation-icons.woff
rename to cdn-server/html/icon/foundation-icons.woff
diff --git a/volume/html/image/grafana/Kubernetes-Monitoring-Arch.png b/cdn-server/html/image/grafana/Kubernetes-Monitoring-Arch.png
similarity index 100%
rename from volume/html/image/grafana/Kubernetes-Monitoring-Arch.png
rename to cdn-server/html/image/grafana/Kubernetes-Monitoring-Arch.png
diff --git a/volume/html/image/grafana/alertmanager.png b/cdn-server/html/image/grafana/alertmanager.png
similarity index 100%
rename from volume/html/image/grafana/alertmanager.png
rename to cdn-server/html/image/grafana/alertmanager.png
diff --git a/volume/html/image/grafana/dashboards_list.png b/cdn-server/html/image/grafana/dashboards_list.png
similarity index 100%
rename from volume/html/image/grafana/dashboards_list.png
rename to cdn-server/html/image/grafana/dashboards_list.png
diff --git a/volume/html/image/grafana/home.png b/cdn-server/html/image/grafana/home.png
similarity index 100%
rename from volume/html/image/grafana/home.png
rename to cdn-server/html/image/grafana/home.png
diff --git a/volume/html/image/grafana/login.png b/cdn-server/html/image/grafana/login.png
similarity index 100%
rename from volume/html/image/grafana/login.png
rename to cdn-server/html/image/grafana/login.png
diff --git a/volume/html/image/grafana/navigation.png b/cdn-server/html/image/grafana/navigation.png
similarity index 100%
rename from volume/html/image/grafana/navigation.png
rename to cdn-server/html/image/grafana/navigation.png
diff --git a/volume/html/image/grafana/nodes_dashboard.png b/cdn-server/html/image/grafana/nodes_dashboard.png
similarity index 100%
rename from volume/html/image/grafana/nodes_dashboard.png
rename to cdn-server/html/image/grafana/nodes_dashboard.png
diff --git a/volume/html/image/grafana/promQL.png b/cdn-server/html/image/grafana/promQL.png
similarity index 100%
rename from volume/html/image/grafana/promQL.png
rename to cdn-server/html/image/grafana/promQL.png
diff --git a/volume/html/image/grafana/prometheus.png b/cdn-server/html/image/grafana/prometheus.png
similarity index 100%
rename from volume/html/image/grafana/prometheus.png
rename to cdn-server/html/image/grafana/prometheus.png
diff --git a/volume/html/image/kibana/Kubernetes-Logging-Arch.png b/cdn-server/html/image/kibana/Kubernetes-Logging-Arch.png
similarity index 100%
rename from volume/html/image/kibana/Kubernetes-Logging-Arch.png
rename to cdn-server/html/image/kibana/Kubernetes-Logging-Arch.png
diff --git a/volume/html/image/kibana/Kubernetes-Logging-Arch.vsdx b/cdn-server/html/image/kibana/Kubernetes-Logging-Arch.vsdx
similarity index 100%
rename from volume/html/image/kibana/Kubernetes-Logging-Arch.vsdx
rename to cdn-server/html/image/kibana/Kubernetes-Logging-Arch.vsdx
diff --git a/volume/html/image/kibana/choose_source.png b/cdn-server/html/image/kibana/choose_source.png
similarity index 100%
rename from volume/html/image/kibana/choose_source.png
rename to cdn-server/html/image/kibana/choose_source.png
diff --git a/volume/html/image/kibana/ffmpeg_log.png b/cdn-server/html/image/kibana/ffmpeg_log.png
similarity index 100%
rename from volume/html/image/kibana/ffmpeg_log.png
rename to cdn-server/html/image/kibana/ffmpeg_log.png
diff --git a/volume/html/image/kibana/filter_log.png b/cdn-server/html/image/kibana/filter_log.png
similarity index 100%
rename from volume/html/image/kibana/filter_log.png
rename to cdn-server/html/image/kibana/filter_log.png
diff --git a/volume/html/image/kibana/generate_report.png b/cdn-server/html/image/kibana/generate_report.png
similarity index 100%
rename from volume/html/image/kibana/generate_report.png
rename to cdn-server/html/image/kibana/generate_report.png
diff --git a/volume/html/image/kibana/logs.png b/cdn-server/html/image/kibana/logs.png
similarity index 100%
rename from volume/html/image/kibana/logs.png
rename to cdn-server/html/image/kibana/logs.png
diff --git a/volume/html/image/kibana/navigation.png b/cdn-server/html/image/kibana/navigation.png
similarity index 100%
rename from volume/html/image/kibana/navigation.png
rename to cdn-server/html/image/kibana/navigation.png
diff --git a/volume/html/image/kibana/save_log.png b/cdn-server/html/image/kibana/save_log.png
similarity index 100%
rename from volume/html/image/kibana/save_log.png
rename to cdn-server/html/image/kibana/save_log.png
diff --git a/volume/html/image/kibana/step_1.png b/cdn-server/html/image/kibana/step_1.png
similarity index 100%
rename from volume/html/image/kibana/step_1.png
rename to cdn-server/html/image/kibana/step_1.png
diff --git a/volume/html/image/kibana/step_2.png b/cdn-server/html/image/kibana/step_2.png
similarity index 100%
rename from volume/html/image/kibana/step_2.png
rename to cdn-server/html/image/kibana/step_2.png
diff --git a/volume/html/image/kibana/visualization_show.png b/cdn-server/html/image/kibana/visualization_show.png
similarity index 100%
rename from volume/html/image/kibana/visualization_show.png
rename to cdn-server/html/image/kibana/visualization_show.png
diff --git a/volume/html/image/kibana/visualization_type.png b/cdn-server/html/image/kibana/visualization_type.png
similarity index 100%
rename from volume/html/image/kibana/visualization_type.png
rename to cdn-server/html/image/kibana/visualization_type.png
diff --git a/volume/html/index.html b/cdn-server/html/index.html
similarity index 100%
rename from volume/html/index.html
rename to cdn-server/html/index.html
diff --git a/volume/html/js/app-api.js b/cdn-server/html/js/app-api.js
similarity index 100%
rename from volume/html/js/app-api.js
rename to cdn-server/html/js/app-api.js
diff --git a/volume/html/js/app-header.js b/cdn-server/html/js/app-header.js
similarity index 100%
rename from volume/html/js/app-header.js
rename to cdn-server/html/js/app-header.js
diff --git a/volume/html/js/app-player.js b/cdn-server/html/js/app-player.js
similarity index 100%
rename from volume/html/js/app-player.js
rename to cdn-server/html/js/app-player.js
diff --git a/volume/html/js/app.js b/cdn-server/html/js/app.js
similarity index 100%
rename from volume/html/js/app.js
rename to cdn-server/html/js/app.js
diff --git a/volume/html/js/vendor/dash.all.min.js b/cdn-server/html/js/vendor/dash.all.min.js
similarity index 100%
rename from volume/html/js/vendor/dash.all.min.js
rename to cdn-server/html/js/vendor/dash.all.min.js
diff --git a/volume/html/js/vendor/foundation.min.js b/cdn-server/html/js/vendor/foundation.min.js
similarity index 100%
rename from volume/html/js/vendor/foundation.min.js
rename to cdn-server/html/js/vendor/foundation.min.js
diff --git a/volume/html/js/vendor/hls.min.js b/cdn-server/html/js/vendor/hls.min.js
similarity index 100%
rename from volume/html/js/vendor/hls.min.js
rename to cdn-server/html/js/vendor/hls.min.js
diff --git a/volume/html/js/vendor/jquery-3.2.1.min.js b/cdn-server/html/js/vendor/jquery-3.2.1.min.js
similarity index 100%
rename from volume/html/js/vendor/jquery-3.2.1.min.js
rename to cdn-server/html/js/vendor/jquery-3.2.1.min.js
diff --git a/volume/html/js/vendor/what-input.js b/cdn-server/html/js/vendor/what-input.js
similarity index 100%
rename from volume/html/js/vendor/what-input.js
rename to cdn-server/html/js/vendor/what-input.js
diff --git a/volume/html/player.shtml b/cdn-server/html/player.shtml
similarity index 100%
rename from volume/html/player.shtml
rename to cdn-server/html/player.shtml
diff --git a/cdn-server/nginx.conf b/cdn-server/nginx.conf
index cbf1996..64b50a1 100644
--- a/cdn-server/nginx.conf
+++ b/cdn-server/nginx.conf
@@ -20,7 +20,7 @@ rtmp {
application hls {
live on;
hls on;
- hls_path /var/www/hls;
+ hls_path /var/www/video/hls;
hls_nested on;
hls_fragment 3;
hls_playlist_length 60;
@@ -32,7 +32,7 @@ rtmp {
application dash {
live on;
dash on;
- dash_path /var/www/dash;
+ dash_path /var/www/video/dash;
dash_fragment 4;
dash_playlist_length 120;
dash_nested on;
@@ -67,7 +67,7 @@ http {
limit_req_zone $binary_remote_addr zone=allips:10m rate=200r/s;
server {
- listen 8080 ssl;
+ listen 8443 ssl;
server_name _;
ssl_certificate /var/run/secrets/self.crt;
@@ -116,7 +116,7 @@ http {
}
location /hls/ {
- root /var/www;
+ root /var/www/video;
add_header Cache-Control no-cache;
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length';
@@ -127,7 +127,7 @@ http {
}
location /dash/ {
- root /var/www;
+ root /var/www/video;
add_header Cache-Control no-cache;
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length';
@@ -137,7 +137,7 @@ http {
}
location ~* /dash/.*/index.mpd$ {
- root /var/www;
+ root /var/www/video;
add_header Cache-Control no-cache;
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length';
@@ -148,7 +148,7 @@ http {
}
location ~* /hls/.*/index.m3u8$ {
- root /var/www;
+ root /var/www/video;
add_header Cache-Control no-cache;
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Expose-Headers' 'Content-Length';
diff --git a/cdn-server/schedule.py b/cdn-server/schedule.py
index 04a9935..37be184 100755
--- a/cdn-server/schedule.py
+++ b/cdn-server/schedule.py
@@ -4,9 +4,10 @@
from tornado import web, gen
from messaging import Producer
import time
+import json
KAFKA_TOPIC = "content_provider_sched"
-DASHLS_ROOT = "/var/www"
+DASHLS_ROOT = "/var/www/video"
class ScheduleHandler(web.RequestHandler):
@gen.coroutine
@@ -16,7 +17,21 @@ def get(self):
# schedule producing the stream
print("request received to process stream: "+stream, flush=True)
producer = Producer()
- producer.send(KAFKA_TOPIC, stream)
+ msg={}
+ msg.update({
+ "name":stream.split("/")[1],
+ "parameters": {
+ "renditions":[ ],
+ "codec_type": "AVC"
+ },
+ "output": {
+ "target": "file",
+ "type": stream.split("/")[0]
+ },
+ "live_vod": "vod",
+ "loop": 0
+ })
+ producer.send(KAFKA_TOPIC, json.dumps(msg))
producer.close()
# wait until file is available, return it
diff --git a/cdn-server/shell.sh b/cdn-server/shell.sh
index 777de8b..a7b0c74 100755
--- a/cdn-server/shell.sh
+++ b/cdn-server/shell.sh
@@ -1,6 +1,6 @@
#!/bin/bash -e
-IMAGE="ovc_cdn_service"
+IMAGE="tc_cdn_service"
DIR=$(dirname $(readlink -f "$0"))
OPTIONS=("--volume=${DIR}/../../volume/video/archive:/var/www/archive:ro" "--volume=${DIR}/../../volume/video/dash:/var/www/dash:ro" "--volume=${DIR}/../../volume/video/hls:/var/www/hls:ro")
diff --git a/cdn-server/tasks.py b/cdn-server/tasks.py
index c4ccb4c..0e2e112 100755
--- a/cdn-server/tasks.py
+++ b/cdn-server/tasks.py
@@ -16,7 +16,7 @@ def del_file(path):
@celery.task
def in_out(proPath, srcPath, fileName, count):
i=0
- while i < 10:
+ while i < 20:
if len(os.listdir(proPath)) == int(count) + 1:
try:
with open(os.path.join(srcPath, fileName), "wb") as upload_file:
diff --git a/xcode-server/hardware/CMakeLists.txt b/common/CMakeLists.txt
similarity index 53%
rename from xcode-server/hardware/CMakeLists.txt
rename to common/CMakeLists.txt
index 5c5eb27..bef91ce 100644
--- a/xcode-server/hardware/CMakeLists.txt
+++ b/common/CMakeLists.txt
@@ -1,2 +1,2 @@
-set(service "ovc_hardware_transcode_service")
+set(service "tc_common")
include("${CMAKE_SOURCE_DIR}/script/service.cmake")
diff --git a/common/Dockerfile b/common/Dockerfile
new file mode 100644
index 0000000..3a85b95
--- /dev/null
+++ b/common/Dockerfile
@@ -0,0 +1,19 @@
+
+FROM ubuntu:18.04
+
+RUN apt-get update -q && apt-get install -y -q python3-requests python3-ply python3-psutil && rm -rf /var/lib/apt/lists/*
+
+COPY *.py /home/
+ENV PYTHONIOENCODING=UTF-8
+
+####
+ARG USER=docker
+ARG GROUP=docker
+ARG UID
+ARG GID
+## must use ; here to ignore user exist status code
+RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} ${GROUP}; \
+ [ ${UID} -gt 0 ] && useradd -d /home -M -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} ${USER}; \
+ chown -R ${UID}:${GID} /home
+####
+
diff --git a/common/build.sh b/common/build.sh
new file mode 100755
index 0000000..45ec0cd
--- /dev/null
+++ b/common/build.sh
@@ -0,0 +1,5 @@
+#!/bin/bash -e
+
+IMAGE="tc_common"
+DIR=$(dirname $(readlink -f "$0"))
+. "$DIR/../script/build.sh"
diff --git a/common/ffmpegcmd.py b/common/ffmpegcmd.py
new file mode 100755
index 0000000..0055742
--- /dev/null
+++ b/common/ffmpegcmd.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python3
+
+import subprocess
+import json
+
+RENDITIONS_SAMPLE = (
+ # resolution bitrate(kbps) audio-rate(kbps)
+ [3840, 2160, 14000000, 192000],
+ [2560, 1440, 10000000, 192000],
+ [1920, 1080, 5000000, 192000],
+ [1280, 720, 2800000, 192000],
+ [842, 480, 1400000, 128000],
+ [640, 360, 800000, 128000]
+)
+
+default_params={
+ "hls_dash_params": {
+ "duration": 2,
+ "segment_num": 0
+ },
+ "tc_params": {
+ "renditions":[[1920, 1080, 5000000, 192000]],
+ "codec_type":"AVC",
+ "gop_size": "100",
+ "framerate": "30",
+ "bframe": "2",
+ "preset": "veryfast",
+ "profile": "578",
+ "level": "30",
+ "refs": "2",
+ "forced_idr": "1",
+ "target_type": "mp4"
+ }
+}
+
+codec_setting={
+ "sw": {
+ "AVC": "libx264",
+ "HEVC": "libsvt_hevc"
+ },
+ "vaapi": {
+ "AVC": "h264_vaapi",
+ "HEVC": "hevc_vaapi"
+ },
+ "qsv": {
+ "AVC": "h264_qsv",
+ "HEVC": "hevc_qsv"
+ }
+}
+class FFMpegCmd:
+ def __init__(self, in_params, out_params, streaming_type, params, loop=0, acc_type="sw", device=None):
+ self._in_file=in_params
+ self._target=out_params
+ self._tc_params=params if params else default_params["tc_params"]
+ self._hls_dash_params=params["hls_dash_params"] if "hls_dash_params" in params.keys() else default_params["hls_dash_params"]
+ self._acc_type=acc_type
+
+ self._segment_num=self._hls_dash_params["segment_num"]
+ self._duration=self._hls_dash_params["duration"]
+
+ self._stream_info=None
+ self._streaming_type=streaming_type
+
+ self._renditions=self._tc_params["renditions"] if self._tc_params["renditions"] else RENDITIONS_SAMPLE
+
+ self._codec_type = self._tc_params["codec_type"]
+
+ self._cmd_base=["ffmpeg", "-hide_banner", "-y"]
+ if loop:
+ self._cmd_base = self._cmd_base + ["-stream_loop", "-1"]
+
+ self._device=device
+ if not device and self._acc_type != "sw":
+ self._device = "/dev/dri/renderD128"
+
+ if self._acc_type == "vaapi":
+ self._cmd_base = self._cmd_base + ["-hwaccel", "vaapi", "-hwaccel_device", self._device, "-hwaccel_output_format", "vaapi"]
+ elif self._acc_type == "qsv":
+ self._cmd_base = self._cmd_base + ["-hwaccel", "qsv", "-qsv_device", self._device, "-c:v", "h264_qsv"]
+
+ self._cmd_base = self._cmd_base + ["-i", self._in_file]
+
+ self._keyframe_interval = 0
+ self._frame_height = 0
+ self._clip_v_duration = 0
+ self._clip_a_duration = 0
+
+ self._segment_target_duration = self._duration # try to create a new segment every X seconds
+ self._max_bitrate_ratio = 1.07 # maximum accepted bitrate fluctuations
+ self._rate_monitor_buffer_ratio = 1.5 # maximum buffer size between bitrate conformance checks
+
+ self._default_threshold = 4
+ self.stream_info(self._in_file)
+ self._codec = self._get_codec()
+ # hls and dash
+ self._cmd_static = ["-c:v", self._codec, "-profile:v", "main", "-sc_threshold", "0", "-strict", "-2"]
+ if self._acc_type != "sw":
+ self._cmd_static = ["-profile:v", "main", "-c:v", self._codec]
+ self._cmd_static += ["-g", str(self._keyframe_interval)]
+
+
+ def _to_kps(self, bitrate):
+ return str(int(bitrate/1000))+"k"
+
+ def _get_codec(self):
+ return codec_setting[self._acc_type][self._codec_type]
+
+ def stream_info(self, in_file):
+ ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams", in_file]
+ p = subprocess.Popen(ffprobe_cmd, stdout=subprocess.PIPE)
+ p.wait()
+ clip_info = json.loads(p.stdout.read().decode("utf-8"))
+
+ for item in clip_info["streams"]:
+ if item["codec_type"] == "video":
+ self._keyframe_interval = int(eval(item["avg_frame_rate"])+0.5)
+ self._frame_height = item["height"]
+ self._clip_v_duration = eval(item["duration"])
+ if item["codec_type"] == "audio":
+ self._clip_a_duration = eval(item["duration"])
+
+ if self._segment_num != 0:
+ segment_duration = (int)((self._clip_v_duration+2.0)/self._segment_num)
+ if segment_duration < self._segment_target_duration:
+ self._segment_target_duration = segment_duration
+
+ def _hls(self):
+ cmd_hls = ["-hls_time", str(self._segment_target_duration), "-hls_list_size", "0"]
+ cmd_fade_in_out = ["-an"]
+ cmd_abr=[]
+ master_playlist = "#EXTM3U" + "\n" + "#EXT-X-VERSION:3" +"\n" + "#" + "\n"
+ count = 0
+ for item in self._renditions:
+ width = item[0]
+ height = item[1]
+ v_bitrate = self._to_kps(item[2])
+ a_bitrate = self._to_kps(item[3])
+ maxrate = self._to_kps(item[2] * self._max_bitrate_ratio)
+ name = str(height) + "p"
+ if self._frame_height < height:
+ continue
+
+ cmd_1 = ["-vf", "scale=w="+str(width)+":"+"h="+str(height)]
+ if self._acc_type == "vaapi":
+ cmd_1 = ["-vf", "scale_vaapi=w="+str(width)+":"+"h="+str(height)+":format=nv12"]
+ elif self._acc_type == "qsv":
+ cmd_1 = ["-vf", "scale_qsv=w="+str(width)+":"+"h="+str(height)+":format=nv12"]
+
+ cmd_2 = ["-b:v", v_bitrate, "-maxrate", maxrate]
+ cmd_3 = ["-f", self._streaming_type]
+ cmd_4 = ["-hls_segment_filename", self._target+"/"+name+"_"+"%03d.ts", self._target+"/"+name+".m3u8"]
+ master_playlist += "#EXT-X-STREAM-INF:BANDWIDTH="+str(item[2])+","+"RESOLUTION="+str(width)+"x"+str(height)+"\n"+name+".m3u8"+"\n"
+ cmd_abr += cmd_1 + self._cmd_static + cmd_2 + cmd_fade_in_out + cmd_3 + cmd_hls + cmd_4
+
+ count += 1
+ if count > self._default_threshold:
+ break
+ with open(self._target+"/"+"index.m3u8", "w", encoding='utf-8') as f:
+ f.write(master_playlist)
+ return cmd_abr
+
+ def _dash(self):
+ cmd_dash = ["-use_timeline", "1", "-use_template", "1", "-seg_duration", str(self._segment_target_duration), "-adaptation_sets", "id=0,streams=v"]
+ cmd_abr=[]
+ cmd_scale=[]
+
+ count = 0
+ for item in self._renditions:
+ width = item[0]
+ height = item[1]
+ v_bitrate = self._to_kps(item[2])
+ a_bitrate = self._to_kps(item[3])
+ maxrate = self._to_kps(item[2] * self._max_bitrate_ratio)
+ if self._frame_height < height:
+ continue
+ cmd_1 = ["-map", "[out"+str(count) +"]", "-b:v"+":"+str(count), v_bitrate, "-maxrate"+":"+str(count), maxrate]
+ if self._acc_type == "vaapi":
+ cmd_scale += [";", "[mid"+str(count) +"]", "scale_vaapi=w="+str(width)+":"+"h="+str(height)+":format=nv12","[out"+str(count) +"]"]
+ elif self._acc_type == "qsv":
+ cmd_scale += [";", "[mid"+str(count) +"]", "scale_qsv=w="+str(width)+":"+"h="+str(height)+":format=nv12","[out"+str(count) +"]"]
+ else:
+ cmd_scale += [";", "[mid"+str(count) +"]", "scale=w="+str(width)+":"+"h="+str(height),"[out"+str(count) +"]"]
+ cmd_abr += cmd_1
+ count += 1
+ if count > self._default_threshold:
+ break
+ cmd_scale = ["[0:v]split="+str(count)]+["[mid"+str(_id) +"]" for _id in range(count)]+cmd_scale
+ return ["-filter_complex"] +["".join(cmd_scale)]+ self._cmd_static + cmd_abr +["-f", "dash"] + cmd_dash + ["-y", self._target+"/"+"index.mpd"]
+
+ def _tc(self):
+ cmd_1 = []
+ params = self._tc_params
+ stream_name = self._target.split("/")[-1].split(".")[0]
+ for item in self._renditions:
+ width = item[0]
+ height = item[1]
+ v_bitrate = self._to_kps(item[2])
+ a_bitrate = self._to_kps(item[3])
+ maxrate = self._to_kps(item[2] * self._max_bitrate_ratio)
+ name= self._target+"/"+stream_name+self._codec_type+"_"+str(height)+"p."+self._streaming_type if self._streaming_type == "mp4" else self._target+"_"+self._codec_type+str(height)+"p"
+
+ if self._acc_type == "vaapi":
+ cmd_1 += ["-vf", "scale_vaapi=w="+str(width)+":"+"h="+str(height)+":format=nv12", "-c:v", self._codec]
+ cmd_1 += ["-profile:v", "main", "-b:v", v_bitrate, "-maxrate", v_bitrate, "-r", params["framerate"],"-g", params["gop_size"], "-bf", params["bframe"], "-an", "-f", self._streaming_type, name]
+ elif self._acc_type == "qsv":
+ cmd_1 += ["-vf", "scale_qsv=w="+str(width)+":"+"h="+str(height)+":format=nv12", "-c:v", self._codec]
+ cmd_1 += ["-profile:v", "main", "-b:v", v_bitrate, "-maxrate", v_bitrate, "-r", params["framerate"],"-g", params["gop_size"], "-bf", params["bframe"], "-an", "-f", self._streaming_type, name]
+ else:
+ cmd_1 += ["-vf", "scale=w="+str(width)+":"+"h="+str(height),"-c:v", self._codec, "-b:v", v_bitrate]
+ cmd_1 += ["-r", params["framerate"],"-g", params["gop_size"], "-bf", params["bframe"], "-refs", params["refs"], "-preset", params["preset"], "-forced-idr", params["forced_idr"], "-an", "-f", self._streaming_type, name]
+
+ return cmd_1
+
+ def cmd(self):
+ cmd = []
+ if self._streaming_type == "hls":
+ cmd = self._cmd_base + self._hls()
+ if self._streaming_type == "dash":
+ cmd = self._cmd_base + self._dash()
+ elif self._streaming_type == "mp4":
+ cmd = self._cmd_base + self._tc()
+ return cmd
+
diff --git a/xcode-server/hardware/messaging.py b/common/messaging.py
similarity index 100%
rename from xcode-server/hardware/messaging.py
rename to common/messaging.py
diff --git a/common/shell.sh b/common/shell.sh
new file mode 100755
index 0000000..633016d
--- /dev/null
+++ b/common/shell.sh
@@ -0,0 +1,6 @@
+#!/bin/bash -e
+
+IMAGE="tc_common"
+DIR=$(dirname $(readlink -f "$0"))
+
+. "$DIR/../script/shell.sh"
diff --git a/xcode-server/hardware/zkstate.py b/common/zkstate.py
similarity index 100%
rename from xcode-server/hardware/zkstate.py
rename to common/zkstate.py
diff --git a/content-provider/CMakeLists.txt b/content-provider/CMakeLists.txt
index e2e455e..8623124 100644
--- a/content-provider/CMakeLists.txt
+++ b/content-provider/CMakeLists.txt
@@ -1,2 +1,2 @@
-set(service "ovc_content_provider_archive")
+set(service "tc_content_provider_archive")
include("${CMAKE_SOURCE_DIR}/script/service.cmake")
diff --git a/content-provider/Dockerfile b/content-provider/Dockerfile
index f1f5c07..f8d1460 100644
--- a/content-provider/Dockerfile
+++ b/content-provider/Dockerfile
@@ -1,3 +1,13 @@
FROM ubuntu:18.04
-RUN apt-get update && apt-get install -y wget ffmpeg
+RUN apt-get update && apt-get install -y wget ffmpeg && rm -rf /var/lib/apt/lists/*
+
+####
+ARG UID
+ARG GID
+## must use ; here to ignore user exist status code
+RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \
+ [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \
+ chown -R ${UID}:${GID} /home
+USER ${UID}
+####
diff --git a/content-provider/build.sh b/content-provider/build.sh
index 62c6b63..bf7484f 100755
--- a/content-provider/build.sh
+++ b/content-provider/build.sh
@@ -1,20 +1,35 @@
#!/bin/bash -e
-IMAGE="ovc_content_provider_archive"
+IMAGE="tc_content_provider_archive"
DIR=$(dirname $(readlink -f "$0"))
-sample_video="http://distribution.bbb3d.renderfarming.net/video/mp4"
-clips=("$sample_video/bbb_sunflower_1080p_30fps_normal.mp4")
-
+LICENSE="https://www.pexels.com/photo-license"
+clips=(
+ https://www.pexels.com/video/3115738/download
+ https://www.pexels.com/video/1110140/download
+ https://www.pexels.com/video/2644023/download
+ https://www.pexels.com/video/2257025/download
+ https://www.pexels.com/video/3743056/download
+ https://www.pexels.com/video/5419496/download
+ https://www.pexels.com/video/2324293/download
+ https://www.pexels.com/video/5413799/download
+ https://www.pexels.com/video/3063911/download
+)
+
case "$(cat /proc/1/sched | head -n 1)" in
*build.sh*)
cd /mnt
- mkdir -p archive dash hls
+ mkdir -p /mnt/raw
for clip in "${clips[@]}"; do
- clip_name="${clip/*\//}"
- clip_name="${clip_name/*=/}"
- clip_name="${clip_name/.mp4/}.mp4"
+ clip_name="$(echo $clip | cut -f5 -d/).mp4"
if test ! -f "archive/$clip_name"; then
- wget -O "archive/$clip_name" "$clip"
+ if test "$reply" == ""; then
+ printf "\n\n\nThe sample requires you to have a set of video clips as the transcoding and streaming source. Please accept the license terms from $LICENSE to start downloading the video clips.\n\nThe terms and conditions of the license apply. Intel does not grant any rights to the video files.\n\n\nPlease type \"accept\" or anything else to skip the download.\n"
+ read reply
+ fi
+ if test "$reply" == "accept"; then
+ echo "Downloading $clip..."
+ wget -q -U "XXX YYY" -O "archive/$clip_name" "$clip"
+ fi
fi
done
for clip in `find archive -name "*.mp4" -print`; do
@@ -23,12 +38,16 @@ case "$(cat /proc/1/sched | head -n 1)" in
ffmpeg -i "archive/$clip_name" -vf "thumbnail,scale=640:360" -frames:v 1 -y "archive/$clip_name".png
fi
done
+ for clip in `find archive -name "*.mp4" -print`; do
+ clip_name="${clip/*\//}"
+ if test ! -f "raw/$clip_name".yuv; then
+ ffmpeg -i "archive/$clip_name" -vcodec rawvideo -an -frames:v 600 -y "raw/$clip_name".yuv
+ fi
+ done
wait
;;
*)
mkdir -p "$DIR/../volume/video/archive"
- mkdir -p "$DIR/../volume/video/dash"
- mkdir -p "$DIR/../volume/video/hls"
. "$DIR/../script/build.sh"
. "$DIR/shell.sh" /home/build.sh $@
;;
diff --git a/content-provider/shell.sh b/content-provider/shell.sh
index 07dbf94..fb46716 100755
--- a/content-provider/shell.sh
+++ b/content-provider/shell.sh
@@ -1,6 +1,6 @@
#!/bin/bash -e
-IMAGE="ovc_content_provider_archive"
+IMAGE="tc_content_provider_archive"
DIR=$(dirname $(readlink -f "$0"))
OPTIONS=("--volume=$DIR/../volume/video:/mnt:rw" "--volume=$DIR:/home:ro")
diff --git a/deployment/CMakeLists.txt b/deployment/CMakeLists.txt
index 162c01f..a910e81 100644
--- a/deployment/CMakeLists.txt
+++ b/deployment/CMakeLists.txt
@@ -1 +1,5 @@
include("${CMAKE_SOURCE_DIR}/script/scan-all.cmake")
+
+if(NOT REGISTRY)
+ add_custom_target(update ${CMAKE_HOME_DIRECTORY}/script/update-image.sh)
+endif()
diff --git a/deployment/certificate/CMakeLists.txt b/deployment/certificate/CMakeLists.txt
index ca21d02..418c4f2 100644
--- a/deployment/certificate/CMakeLists.txt
+++ b/deployment/certificate/CMakeLists.txt
@@ -1,2 +1,2 @@
-set(service "ovc_self_certificate")
+set(service "tc_self_certificate")
include("${CMAKE_SOURCE_DIR}/script/service.cmake")
diff --git a/deployment/certificate/Dockerfile b/deployment/certificate/Dockerfile
index 8d5adcd..ddd0736 100644
--- a/deployment/certificate/Dockerfile
+++ b/deployment/certificate/Dockerfile
@@ -1,3 +1,13 @@
FROM ubuntu:18.04
RUN apt-get update && apt-get install -y openssh-server
+
+####
+ARG UID
+ARG GID
+## must use ; here to ignore user exist status code
+RUN [ ${GID} -gt 0 ] && groupadd -f -g ${GID} docker; \
+ [ ${UID} -gt 0 ] && useradd -d /home/docker -g ${GID} -K UID_MAX=${UID} -K UID_MIN=${UID} docker; \
+ echo
+USER ${UID}
+####
diff --git a/deployment/certificate/build.sh b/deployment/certificate/build.sh
index e57c154..eed5005 100755
--- a/deployment/certificate/build.sh
+++ b/deployment/certificate/build.sh
@@ -1,6 +1,6 @@
#!/bin/bash -e
-IMAGE="ovc_self_certificate"
+IMAGE="tc_self_certificate"
DIR=$(dirname $(readlink -f "$0"))
. "$DIR/../../script/build.sh"
diff --git a/deployment/certificate/self-sign.sh b/deployment/certificate/self-sign.sh
index 29dc664..05adebd 100755
--- a/deployment/certificate/self-sign.sh
+++ b/deployment/certificate/self-sign.sh
@@ -1,6 +1,6 @@
#!/bin/bash -e
-IMAGE="ovc_self_certificate"
+IMAGE="tc_self_certificate"
DIR=$(dirname $(readlink -f "$0"))
case "$(cat /proc/1/sched | head -n 1)" in
diff --git a/deployment/certificate/shell.sh b/deployment/certificate/shell.sh
index 9674d40..c91244e 100755
--- a/deployment/certificate/shell.sh
+++ b/deployment/certificate/shell.sh
@@ -1,6 +1,6 @@
#!/bin/bash -e
-IMAGE="ssai_self_certificate"
+IMAGE="tc_self_certificate"
DIR=$(dirname $(readlink -f "$0"))
. "$DIR/../../script/shell.sh"
diff --git a/deployment/docker-swarm/CMakeLists.txt b/deployment/docker-swarm/CMakeLists.txt
deleted file mode 100644
index 74f8b99..0000000
--- a/deployment/docker-swarm/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-set(service "docker_swarm")
-include("${CMAKE_SOURCE_DIR}/script/deployment.cmake")
-set(service "docker_compose")
-include("${CMAKE_SOURCE_DIR}/script/deployment.cmake")
diff --git a/deployment/docker-swarm/docker-compose.yml b/deployment/docker-swarm/docker-compose.yml
deleted file mode 100644
index 154f9a5..0000000
--- a/deployment/docker-swarm/docker-compose.yml
+++ /dev/null
@@ -1,105 +0,0 @@
-version: '3.1'
-
-services:
-
- redis-service:
- image: redis:latest
- ports:
- - "6379:6379"
- restart: always
- deploy:
- replicas: 1
- command:
- redis-server
-
- zookeeper-service:
- image: zookeeper:latest
- environment:
- ZOOKEEPER_SERVER_ID: 1
- ZOOKEEPER_CLIENT_PORT: '2181'
- ZOOKEEPER_TICK_TIME: '2000'
- ZOOKEEPER_HEAP_OPTS: '-Xmx2048m -Xms2048m'
- ZOOKEEPER_MAX_CLIENT_CNXNS: '20000'
- ZOOKEEPER_LOG4J_LOGGERS: 'zookeepr=ERROR'
- ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'ERROR'
- restart: always
- deploy:
- replicas: 1
-
- kafka-service:
- image: wurstmeister/kafka:latest
- depends_on:
- - zookeeper-service
- environment:
- KAFKA_BROKER_ID: 1
- KAFKA_ADVERTISED_HOST_NAME: 'kafka-service'
- KAFKA_ADVERTISED_PORT: '9092'
- KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-service:2181'
- KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka-service:9092'
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'PLAINTEXT:PLAINTEXT'
- KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- KAFKA_DEFAULT_REPLICATION_FACTOR: 1
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
- KAFKA_NUM_PARTITIONS: 16
- KAFKA_CREATE_TOPICS: 'content_provider_sched:16:1'
- KAFKA_LOG_RETENTION_HOURS: 8
- KAFKA_HEAP_OPTS: '-Xmx1024m -Xms1024m'
- KAFKA_LOG4J_LOGGERS: 'kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR'
- KAFKA_LOG4J_ROOT_LOGLEVEL: 'ERROR'
- restart: always
- deploy:
- replicas: 1
-
- cdn-service:
- image: ovc_cdn_service:latest
- ports:
- - "443:8080"
- volumes:
- - ${HTML_VOLUME}:/var/www/html:ro
- - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:rw
- - ${VIDEO_DASH_VOLUME}:/var/www/dash:rw
- - ${VIDEO_HLS_VOLUME}:/var/www/hls:rw
- - ${NGINX_LOG_VOLUME}:/var/www/log:rw
- depends_on:
- - kafka-service
- deploy:
- replicas: 1
- secrets:
- - source: self_crt
- target: /var/run/secrets/self.crt
- uid: ${USER_ID}
- gid: ${GROUP_ID}
- mode: 0444
- - source: self_key
- target: /var/run/secrets/self.key
- uid: ${USER_ID}
- gid: ${GROUP_ID}
- mode: 0440
-
- vod-transcode-service:
- image: ovc_software_transcode_service:latest
- volumes:
- - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro
- - ${VIDEO_DASH_VOLUME}:/var/www/dash:rw
- - ${VIDEO_HLS_VOLUME}:/var/www/hls:rw
- deploy:
- replicas: 2
- depends_on:
- - kafka-service
- - zookeeper-service
-
- live-transcode-service:
- image: ovc_software_transcode_service:latest
- volumes:
- - ${VIDEO_ARCHIVE_VOLUME}:/var/www/archive:ro
- depends_on:
- - cdn-service
- command: |
- bash -c 'ffmpeg -re -stream_loop -1 -i /var/www/archive/bbb_sunflower_1080p_30fps_normal.mp4 -vf scale=2560:1440 -c:v libsvt_hevc -b:v 15M -forced-idr 1 -f flv rtmp://cdn-service/hls/big_buck_bunny_2560x1440 -vf scale=1920:1080 -c:v libsvt_hevc -b:v 10M -forced-idr 1 -f flv rtmp://cdn-service/hls/big_buck_bunny_1920x1080 -vf scale=1280:720 -c:v libx264 -b:v 8M -f flv rtmp://cdn-service/hls/big_buck_bunny_1280x720 -vf scale=854:480 -c:v libx264 -b:v 6M -f flv rtmp://cdn-service/hls/big_buck_bunny_854x480 -abr_pipeline'
-
-secrets:
- self_key:
- file: ${SECRETS_VOLUME}/self.key
- self_crt:
- file: ${SECRETS_VOLUME}/self.crt
diff --git a/deployment/docker-swarm/start.sh b/deployment/docker-swarm/start.sh
deleted file mode 100755
index ed5df8c..0000000
--- a/deployment/docker-swarm/start.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash -e
-
-DIR=$(dirname $(readlink -f "$0"))
-export VIDEO_ARCHIVE_VOLUME=$(readlink -f "$DIR/../../volume/video/archive")
-export VIDEO_DASH_VOLUME=$(readlink -f "$DIR/../../volume/video/dash")
-export VIDEO_HLS_VOLUME=$(readlink -f "$DIR/../../volume/video/hls")
-export NGINX_LOG_VOLUME=$(readlink -f "/var/log/nginx")
-export HTML_VOLUME=$(readlink -f "$DIR/../../volume/html")
-export SECRETS_VOLUME=$(readlink -f "$DIR/../certificate")
-
-sudo docker container prune -f
-sudo docker volume prune -f
-sudo docker network prune -f
-sudo rm -rf "${VIDEO_DASH_VOLUME}" "${VIDEO_HLS_VOLUME}"
-sudo mkdir -p "${VIDEO_DASH_VOLUME}" "${VIDEO_HLS_VOLUME}" "${NGINX_LOG_VOLUME}"
-
-yml="$DIR/docker-compose.$(hostname).yml"
-test -f "$yml" || yml="$DIR/docker-compose.yml"
-
-case "$1" in
-docker_compose)
- dcv="$(docker-compose --version | cut -f3 -d' ' | cut -f1 -d',')"
- mdcv="$(printf '%s\n' $dcv 1.20 | sort -r -V | head -n 1)"
- if test "$mdcv" = "1.20"; then
- echo ""
- echo "docker-compose >=1.20 is required."
- echo "Please upgrade docker-compose at https://docs.docker.com/compose/install."
- echo ""
- exit 0
- fi
- export USER_ID=$(id -u)
- export GROUP_ID=$(id -g)
- sudo -E docker-compose -f "$yml" -p ovc --compatibility up
- ;;
-*)
- export USER_ID=$(id -u)
- export GROUP_ID=$(id -g)
- "$DIR/../certificate/self-sign.sh"
- sudo -E docker stack deploy -c "$yml" ovc
- ;;
-esac
diff --git a/deployment/docker-swarm/stop.sh b/deployment/docker-swarm/stop.sh
deleted file mode 100755
index d3f4b54..0000000
--- a/deployment/docker-swarm/stop.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash -e
-
-DIR=$(dirname $(readlink -f "$0"))
-
-yml="$DIR/docker-compose.$(hostname).yml"
-test -f "$yml" || yml="$DIR/docker-compose.yml"
-case "$1" in
-docker_compose)
- dcv="$(docker-compose --version | cut -f3 -d' ' | cut -f1 -d',')"
- mdcv="$(printf '%s\n' $dcv 1.10 | sort -r -V | head -n 1)"
- if test "$mdcv" = "1.10"; then
- echo ""
- echo "docker-compose >=1.10 is required."
- echo "Please upgrade docker-compose at https://docs.docker.com/compose/install."
- echo ""
- exit 0
- fi
- sudo docker-compose -f "$yml" -p ovc --compatibility down
- ;;
-*)
- sudo docker stack rm ovc
- ;;
-esac
-
-sudo docker container prune -f
-sudo docker volume prune -f
-sudo docker network prune -f
diff --git a/deployment/kubernetes/CMakeLists.txt b/deployment/kubernetes/CMakeLists.txt
index e85ab85..248dfe8 100644
--- a/deployment/kubernetes/CMakeLists.txt
+++ b/deployment/kubernetes/CMakeLists.txt
@@ -1,2 +1,11 @@
-set(service "kubernetes")
-include("${CMAKE_SOURCE_DIR}/script/deployment.cmake")
+set(service "pv")
+include("${CMAKE_SOURCE_DIR}/script/service.cmake")
+include("${CMAKE_SOURCE_DIR}/script/scan-all.cmake")
+add_custom_target(volume ${CMAKE_CURRENT_SOURCE_DIR}/mkvolume.sh)
+
+# add cleanup files
+file(GLOB m4files "${CMAKE_CURRENT_SOURCE_DIR}/*.yaml.m4")
+foreach(m4file ${m4files})
+ string(REPLACE ".yaml.m4" ".yaml" yamlfile "${m4file}")
+ set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${yamlfile}")
+endforeach(m4file)
diff --git a/deployment/kubernetes/README.md b/deployment/kubernetes/README.md
new file mode 100644
index 0000000..5ef65d9
--- /dev/null
+++ b/deployment/kubernetes/README.md
@@ -0,0 +1,41 @@
+
+The CDN-Transcode sample can be deployed with Kubernetes.
+
+### Kubernetes Setup
+
+- Follow the [instructions](https://kubernetes.io/docs/setup) to setup your Kubernetes cluster.
+
+- Optional: setup password-less access from the Kubernetes controller to each worker node (required by `make update`):
+
+```
+ssh-keygen
+ssh-copy-id
+```
+
+- Start/stop services as follows:
+
+```
+mkdir build
+cd build
+cmake ..
+make
+make update # optional for private docker registry
+make volume
+make start_kubernetes
+make stop_kubernetes
+```
+
+---
+
+The command ```make update``` uploads the sample images to each worker node. If you prefer to use a private docker registry, configure the sample, `cmake -DREGISTRY= ..`, to push images to the private registry after each build.
+- The `make volume` command creates local persistent volumes under the `/tmp` directory of the first two Kubernetes workers. This is a temporary solution for quick sample deployment. For scalability beyond a two-node cluster, consider rewriting the persistent volume scripts.
+
+---
+
+### See Also
+
+- [Helm Charts](helm/cdn-transcode/README.md)
+- [CMake Options](../../doc/cmake.md)
+- [Reference Architecture](https://networkbuilders.intel.com/solutionslibrary/container-bare-metal-for-2nd-generation-intel-xeon-scalable-processor)
+
+
diff --git a/deployment/kubernetes/build.sh b/deployment/kubernetes/build.sh
new file mode 100755
index 0000000..a1b6515
--- /dev/null
+++ b/deployment/kubernetes/build.sh
@@ -0,0 +1,23 @@
+#!/bin/bash -e
+
+DIR=$(dirname $(readlink -f "$0"))
+
+rm -rf "$DIR/../../volume/video/cache"
+mkdir -p "$DIR/../../volume/video/cache/hls" "$DIR/../../volume/video/cache/dash"
+
+# make sure kubectl is functional
+kubectl get node >/dev/null 2>/dev/null || exit 0
+
+hosts=($(kubectl get node -l vcac-zone!=yes -o custom-columns=NAME:metadata.name,STATUS:status.conditions[-1].type,TAINT:spec.taints | grep " Ready " | grep -v "NoSchedule" | cut -f1 -d' '))
+
+if test ${#hosts[@]} -eq 0; then
+ printf "\nFailed to locate worker node(s) for shared storage\n\n"
+ exit -1
+elif test ${#hosts[@]} -lt 2; then
+ hosts=(${hosts[0]} ${hosts[0]})
+fi
+
+. "$DIR/volume-info.sh" "${hosts[@]}"
+for pv in $(find "${DIR}" -maxdepth 1 -name "*-pv.yaml.m4" -print); do
+ m4 $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}" "${pv}" > "${pv/.m4/}"
+done
diff --git a/deployment/kubernetes/docker-compose-template.yml b/deployment/kubernetes/docker-compose-template.yml
deleted file mode 100644
index 2680631..0000000
--- a/deployment/kubernetes/docker-compose-template.yml
+++ /dev/null
@@ -1,153 +0,0 @@
-version: '3.1'
-
-services:
-
- redis-service:
- image: redis:latest
- ports:
- - "6379:6379"
- command:
- redis-server
- deploy:
- replicas: 1
- resources:
- limits:
- cpus: "2"
- memory: 1000M
- reservations:
- cpus: "1"
- memory: 500M
- placement:
- constraints:
- - node.hostname == master.machine
-
- zookeeper-service:
- image: zookeeper:latest
- ports:
- - "2181:2181"
- environment:
- ZOOKEEPER_SERVER_ID: 1
- ZOOKEEPER_CLIENT_PORT: '2181'
- ZOOKEEPER_TICK_TIME: '2000'
- ZOOKEEPER_HEAP_OPTS: '-Xmx2048m -Xms2048m'
- ZOOKEEPER_MAX_CLIENT_CNXNS: '20000'
- ZOOKEEPER_LOG4J_LOGGERS: 'zookeepr=ERROR'
- ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'ERROR'
- restart: always
- deploy:
- replicas: 1
- resources:
- limits:
- cpus: "2"
- memory: 1000M
- reservations:
- cpus: "1"
- memory: 500M
- placement:
- constraints:
- - node.hostname == master.machine
- labels:
- kompose.service.type: NodePort
-
- kafka-service:
- image: wurstmeister/kafka:latest
- ports:
- - "9092:9092"
- depends_on:
- - zookeeper-service
- environment:
- KAFKA_BROKER_ID: 1
- KAFKA_ADVERTISED_HOST_NAME: 'kafka-service'
- KAFKA_ADVERTISED_PORT: '9092'
- KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-service:2181'
- KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka-service:9092'
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'PLAINTEXT:PLAINTEXT'
- KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- KAFKA_DEFAULT_REPLICATION_FACTOR: 1
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
- KAFKA_NUM_PARTITIONS: 16
- KAFKA_CREATE_TOPICS: 'content_provider_sched:16:1'
- KAFKA_LOG_RETENTION_HOURS: 8
- KAFKA_HEAP_OPTS: '-Xmx1024m -Xms1024m'
- KAFKA_LOG4J_LOGGERS: 'kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR'
- KAFKA_LOG4J_ROOT_LOGLEVEL: 'ERROR'
- restart: always
- deploy:
- replicas: 1
- resources:
- limits:
- cpus: "2"
- memory: 1000M
- reservations:
- cpus: "1"
- memory: 500M
- placement:
- constraints:
- - node.hostname == master.machine
- labels:
- kompose.service.type: NodePort
-
- cdn-service:
- image: ovc_cdn_service:latest
- ports:
- - "8080:8080"
- - "1935:1935"
- depends_on:
- - kafka-service
- deploy:
- replicas: 1
- resources:
- limits:
- cpus: "3"
- memory: 3000M
- reservations:
- cpus: "1.5"
- memory: 1500M
- placement:
- constraints:
- - node.hostname == master.machine
- command: ["bash", "-c", "/home/main.py&/usr/local/sbin/nginx"]
- labels:
- kompose.service.type: NodePort
- kompose.image-pull-policy: IfNotPresent
-
- vod-service:
- image: ovc_software_transcode_service:latest
- deploy:
- replicas: 1
- resources:
- limits:
- cpus: "6"
- memory: 6000M
- reservations:
- cpus: "3"
- memory: 3000M
- placement:
- constraints:
- - node.hostname == master.machine
- depends_on:
- - kafka-service
- - zookeeper-service
- command: ["bash", "-c", "/home/main.py"]
- labels:
- kompose.image-pull-policy: IfNotPresent
-
- live-service:
- image: ovc_software_transcode_service:latest
- deploy:
- replicas: 1
- resources:
- limits:
- cpus: "6"
- memory: 6000M
- reservations:
- cpus: "3"
- memory: 3000M
- placement:
- constraints:
- - node.hostname == master.machine
- depends_on:
- - cdn-service
- labels:
- kompose.image-pull-policy: IfNotPresent
diff --git a/deployment/kubernetes/frame_config.py b/deployment/kubernetes/frame_config.py
deleted file mode 100755
index a743df3..0000000
--- a/deployment/kubernetes/frame_config.py
+++ /dev/null
@@ -1,495 +0,0 @@
-#!/usr/bin/python3
-
-import wx
-import wx.xrc
-import re
-import threading
-import os
-import sys
-from functools import reduce
-import update_yaml
-
-class MyFrame_Config (wx.Frame):
-
- def __init__(self, parent, nfs_server, volume_directory, video_list):
- self.nfs_server = nfs_server
- self.volume_directory = volume_directory
- self.video_list = video_list
-
- self.choice_list = []
- self.node_dict = {}
- basic_info = os.popen("kubectl describe node").read()
- index_list = [i.start() for i in re.finditer("Name:", basic_info)]
- for i in range(len(index_list)):
- cpu_info = re.findall(
- "(\d+)", os.popen("kubectl describe node | awk -F ' ' '$1==\"cpu\"' |awk 'NR==" + str(i+1) + "'").read())
- memory_info = re.findall(
- "(\d+)", os.popen("kubectl describe node | awk -F ' ' '$1==\"memory\" {print $0}'").read())
- cpu = int(int(re.search(
- "cpu:\s+(\d+)", basic_info[index_list[i]: -1]).group(1)) - int(cpu_info[0])/1000)
- memory = int((int(re.search(
- "memory:\s+(\d+)", basic_info[index_list[i]: -1]).group(1)) / 1024 - int(memory_info[0])))
- if cpu > 0 and memory > 0:
- self.choice_list.append({"nodename": re.search(
- "Name:\s+(.+)", basic_info[index_list[i]: -1]).group(1), "cpu": cpu, "memory": memory})
- self.node_dict[re.search(
- "Name:\s+(.+)", basic_info[index_list[i]: -1]).group(1)] = {"cpu": cpu, "memory": memory}
-
- self.setsize_num = 0
- self.live_num = 0
- self.vod_num = 0
- wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u"config", pos=wx.DefaultPosition, size=wx.Size(
- 924, 525), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
- self.Bind(wx.EVT_CLOSE, self.OnClose)
-
- self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
- self.SetBackgroundColour(
- wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
-
- frame_bSizer = wx.BoxSizer(wx.VERTICAL)
-
- self.config_panel = wx.Panel(
- self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_RAISED)
- config_bSizer = wx.BoxSizer(wx.HORIZONTAL)
-
- self.pod_panel = wx.ScrolledWindow(
- self.config_panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL)
- self.pod_panel.SetScrollRate(5, 5)
- pod_bSizer = wx.BoxSizer(wx.VERTICAL)
-
- self.modulelist = ["cdn", "redis", "zookeeper", "kafka", "vod", "live"]
- self.creat_modules_button(pod_bSizer)
-
- self.pod_panel.SetSizer(pod_bSizer)
- self.pod_panel.Layout()
- pod_bSizer.Fit(self.pod_panel)
- config_bSizer.Add(self.pod_panel, 1, wx.ALL | wx.EXPAND, 5)
-
- self.arguments_panel = wx.Panel(
- self.config_panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
- arguments_bSizer = wx.BoxSizer(wx.VERTICAL)
-
- arguments_label = wx.StaticBoxSizer(wx.StaticBox(
- self.arguments_panel, wx.ID_ANY, u"cdn_config"), wx.VERTICAL)
- self.arguments_label = arguments_label
-
- self.pods_dict = {"cdn": {}, "redis": {},
- "zookeeper": {}, "kafka": {}, "vod": {}, "live": {}}
- for key, value in self.pods_dict.items():
- self.create_pod_panel(key, arguments_label)
-
- arguments_bSizer.Add(arguments_label, 1, wx.EXPAND, 5)
-
- self.arguments_panel.SetSizer(arguments_bSizer)
- self.arguments_panel.Layout()
- arguments_bSizer.Fit(self.arguments_panel)
- config_bSizer.Add(self.arguments_panel, 1, wx.EXPAND | wx.ALL, 5)
-
- self.config_panel.SetSizer(config_bSizer)
- self.config_panel.Layout()
- config_bSizer.Fit(self.config_panel)
- frame_bSizer.Add(self.config_panel, 1, wx.EXPAND | wx.ALL, 5)
-
- self.menu_panel = wx.Panel(
- self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
- self.menu_panel.SetMaxSize(wx.Size(-1, 50))
-
- menu_bSizer = wx.BoxSizer(wx.VERTICAL)
-
- menu_sdbSizer = wx.StdDialogButtonSizer()
- self.menu_sdbSizerOK = wx.Button(self.menu_panel, wx.ID_OK)
- menu_sdbSizer.AddButton(self.menu_sdbSizerOK)
- self.menu_sdbSizerCancel = wx.Button(self.menu_panel, wx.ID_CANCEL)
- menu_sdbSizer.AddButton(self.menu_sdbSizerCancel)
- menu_sdbSizer.Realize()
-
- menu_bSizer.Add(menu_sdbSizer, 1, wx.ALIGN_CENTER_HORIZONTAL, 5)
-
- self.menu_panel.SetSizer(menu_bSizer)
- self.menu_panel.Layout()
- menu_bSizer.Fit(self.menu_panel)
- frame_bSizer.Add(self.menu_panel, 1, wx.ALL | wx.EXPAND, 5)
-
- self.log_panel = wx.Panel(
- self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
- log_bSizer = wx.BoxSizer(wx.VERTICAL)
-
- self.log_textCtrl = wx.TextCtrl(
- self.log_panel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_MULTILINE)
- log_bSizer.Add(self.log_textCtrl, 1, wx.ALL | wx.EXPAND, 5)
-
- self.log_panel.SetSizer(log_bSizer)
- self.log_panel.Layout()
- log_bSizer.Fit(self.log_panel)
- frame_bSizer.Add(self.log_panel, 1, wx.EXPAND | wx.ALL, 5)
-
- self.SetSizer(frame_bSizer)
- self.Layout()
-
- self.Centre(wx.VERTICAL)
-
- self.menu_sdbSizerCancel.Bind(
- wx.EVT_BUTTON, self.menu_sdbSizerOnCancelButtonClick)
- self.menu_sdbSizerOK.Bind(
- wx.EVT_BUTTON, self.menu_sdbSizerOnOKButtonClick)
-
- MyFrame_Config.show_pod_panel("cdn")(self, wx.EVT_BUTTON)
-
- def __del__(self):
- pass
-
- def OnClose(self, event):
- sys.exit(1)
-
- def creat_modules_button(self, pod_bSizer):
- for i in self.modulelist:
- setattr(self, i + "_button", wx.Button(self.pod_panel,
- wx.ID_ANY, i, wx.DefaultPosition, wx.DefaultSize, 0))
- pod_bSizer.Add(getattr(self, i + "_button"), 0, wx.ALL, 5)
-
- def create_pod_panel(self, podname, arguments_label):
- if podname == "live" or podname == "vod":
- self.creat_module_panel(arguments_label, podname)
- self.pods_dict[podname] = {
- 'node': None, 'cpu': None, 'memory': None}
- else:
- setattr(self, podname + "_panel", wx.ScrolledWindow(arguments_label.GetStaticBox(),
- wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL))
- getattr(self, podname + "_panel").SetScrollRate(5, 5)
- globals()[podname + "_bSizer"] = wx.BoxSizer(wx.VERTICAL)
- for j in ["node", "cpu", "memory"]:
- setattr(self, podname + "_" + j + "_panel", wx.Panel(getattr(self, podname +
- "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL))
- globals()[podname + "_" + j +
- "_gSizer"] = wx.GridSizer(0, 2, 0, 0)
- setattr(self, podname + "_" + j + "_staticText", wx.StaticText(getattr(self,
- podname + "_" + j + "_panel"), wx.ID_ANY, j, wx.DefaultPosition, wx.DefaultSize, 0))
- getattr(self, podname + "_" + j + "_staticText").Wrap(-1)
- globals()[podname + "_" + j + "_gSizer"].Add(getattr(self,
- podname + "_" + j + "_staticText"), 0, wx.ALL, 5)
- globals()[podname + "_" + "node" + "_choiceChoices"] = [node_dict["nodename"]
- for node_dict in self.choice_list]
-
- globals()[podname + "_" + j + "_choiceChoices"] = [item["nodename"]
- for item in self.choice_list] if j == "node" else []
- setattr(self, podname + "_" + j + "_choice", wx.Choice(getattr(self, podname + "_" + j + "_panel"),
- wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, globals()[podname + "_" + j + "_choiceChoices"], 0))
- getattr(self, podname + "_" + j + "_choice").SetSelection(-1)
- globals()[podname + "_" + j + "_gSizer"].Add(getattr(self,
- podname + "_" + j + "_choice"), 0, wx.ALL, 5)
- getattr(self, podname + "_" + j + "_panel").SetSizer(globals()
- [podname + "_" + j + "_gSizer"])
- getattr(self, podname + "_" + j + "_panel").Layout()
- globals()[
- podname + "_" + j + "_gSizer"].Fit(getattr(self, podname + "_" + j + "_panel"))
- globals()[podname + "_bSizer"].Add(getattr(self,
- podname + "_" + j + "_panel"), 1, wx.EXPAND | wx.ALL, 5)
- self.pods_dict[podname][j] = None
-
- if re.search("[live|vod]\d", podname):
- self.choice_dict = {"input": self.video_list, "mode": {"sw": ["AVC", "HEVC", "AV1"], "hw": ["AVC", "HEVC"]}, "protocol": [
- "HLS", "DASH"], "resolution": ["856:480", "1280:720", "1920:1080", "2560:1440"], "bitrate": [str(i+5) for i in range(15)]}
- panel_list = ["mode", "input"] if re.search(
- "live\d", podname) else ["mode"]
-
- self.pods_dict[podname]["mode"] = None
- if re.search("live\d", podname):
- self.pods_dict[podname]['input'] = None
- for num in range(4):
- self.pods_dict[podname]['transcode' + str(num)] = {
- "codec": None, "protocol": None, "resolution": None, "bitrate": None, "output": None}
-
- for panel_name in panel_list:
- setattr(self, podname + "_" + panel_name + "_panel", wx.Panel(getattr(self, podname +
- "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL))
- globals()[podname + "_" + panel_name +
- "_gSizer"] = wx.GridSizer(0, 2, 0, 0)
-
- setattr(self, podname + "_" + panel_name + "_staticText", wx.StaticText(getattr(self, podname +
- "_" + panel_name + "_panel"), wx.ID_ANY, panel_name, wx.DefaultPosition, wx.DefaultSize, 0))
- getattr(self, podname + "_" +
- panel_name + "_staticText").Wrap(-1)
-
- globals()[podname + "_" + panel_name + "_gSizer"].Add(getattr(self,
- podname + "_" + panel_name + "_staticText"), 0, wx.ALL, 5)
-
- globals()[podname + "_" + panel_name +
- "_choiceChoices"] = self.choice_dict[panel_name] if panel_name == "input" else ["SW", "HW"]
- setattr(self, podname + "_" + panel_name + "_choice", wx.Choice(getattr(self, podname + "_" + panel_name + "_panel"),
- wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, globals()[podname + "_" + panel_name + "_choiceChoices"], 0))
- getattr(self, podname + "_" + panel_name +
- "_choice").SetSelection(-1)
- globals()[podname + "_" + panel_name + "_gSizer"].Add(getattr(self,
- podname + "_" + panel_name + "_choice"), 0, wx.ALL, 5)
-
- getattr(self, podname + "_" + panel_name + "_panel").SetSizer(
- globals()[podname + "_" + panel_name + "_gSizer"])
- getattr(self, podname + "_" +
- panel_name + "_panel").Layout()
- globals()[podname + "_" + panel_name + "_gSizer"].Fit(
- getattr(self, podname + "_" + panel_name + "_panel"))
- globals()[podname + "_bSizer"].Add(getattr(self, podname +
- "_" + panel_name + "_panel"), 1, wx.EXPAND | wx.ALL, 5)
-
- if panel_name == "mode" and re.search("live\d", podname):
- setattr(MyFrame_Config, podname + "_mode_choiceOnChoice",
- MyFrame_Config.mode_choiceOnChoice(podname))
- getattr(self, podname + "_mode_choice").Bind(wx.EVT_CHOICE,
- getattr(self, podname + "_mode_choiceOnChoice"))
-
- if re.search("live\d", podname):
- for num in range(4):
- setattr(self, podname + "_transcode" + str(num) + "_panel", wx.Panel(getattr(
- self, podname + "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL))
- globals()[podname + "_transcode" + str(num) +
- "_bSizer"] = wx.BoxSizer(wx.HORIZONTAL)
-
- for option in ["codec", "protocol", "resolution", "bitrate"]:
- if option == "codec":
- globals()["%s_transcode%d_%s_choiceChoices" % (
- podname, num, option)] = []
- else:
- globals()["%s_transcode%d_%s_choiceChoices" % (
- podname, num, option)] = self.choice_dict[option]
-
- setattr(self, podname + "_transcode" + str(num) + "_" + option + "_choice", wx.Choice(getattr(self, podname + "_transcode" + str(
- num) + "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, globals()["%s_transcode%d_%s_choiceChoices" % (podname, num, option)], 0))
-
- getattr(self, "%s_transcode%d_%s_choice" %
- (podname, num, option)).SetSelection(-1)
- globals()[podname + "_transcode" + str(num) + "_bSizer"].Add(getattr(
- self, "%s_transcode%d_%s_choice" % (podname, num, option)), 0, wx.ALL, 5)
-
- setattr(self, podname + "_transcode" + str(num) + "_output_textCtrl", wx.TextCtrl(getattr(self, podname +
- "_transcode" + str(num) + "_panel"), wx.ID_ANY, "output_name", wx.DefaultPosition, wx.DefaultSize, 0))
- globals()[podname + "_transcode" + str(num) + "_bSizer"].Add(getattr(
- self, podname + "_transcode" + str(num) + "_output_textCtrl"), 0, wx.ALL, 5)
-
- getattr(self, podname + "_transcode" + str(num) + "_panel").SetSizer(
- globals()[podname + "_transcode" + str(num) + "_bSizer"])
- getattr(self, podname + "_transcode" +
- str(num) + "_panel").Layout()
- globals()[podname + "_transcode" + str(num) + "_bSizer"].Fit(
- getattr(self, podname + "_transcode" + str(num) + "_panel"))
- globals()[podname + "_bSizer"].Add(getattr(self, podname +
- "_transcode" + str(num) + "_panel"), 1, wx.EXPAND | wx.ALL, 5)
-
- globals()[podname + "_sdbSizer"] = wx.StdDialogButtonSizer()
- setattr(self, podname + "_sdbSizerSave",
- wx.Button(getattr(self, podname + "_panel"), wx.ID_SAVE))
- globals()[
- podname + "_sdbSizer"].AddButton(getattr(self, podname + "_sdbSizerSave"))
- setattr(self, podname + "_sdbSizerCancel",
- wx.Button(getattr(self, podname + "_panel"), wx.ID_CANCEL))
- globals()[podname + "_sdbSizer"].AddButton(getattr(self,
- podname + "_sdbSizerCancel"))
- globals()[podname + "_sdbSizer"].Realize()
- globals()[podname + "_bSizer"].Add(globals()
- [podname + "_sdbSizer"], 1, wx.EXPAND, 5)
-
- setattr(MyFrame_Config, podname + "_sdbSizerOnSaveButtonClick",
- MyFrame_Config.show_pod_panel(re.split("\d+", podname)[0]))
- getattr(self, podname + "_sdbSizerSave").Bind(wx.EVT_BUTTON,
- getattr(self, podname + "_sdbSizerOnSaveButtonClick"))
- setattr(MyFrame_Config, podname + "_sdbSizerOnCancelButtonClick",
- MyFrame_Config.cancel_pod_panel(podname))
- getattr(self, podname + "_sdbSizerCancel").Bind(wx.EVT_BUTTON,
- getattr(self, podname + "_sdbSizerOnCancelButtonClick"))
-
- setattr(MyFrame_Config, podname + "_node_choiceOnChoice",
- MyFrame_Config.node_choiceOnChoice(podname))
- getattr(self, podname + "_node_choice").Bind(wx.EVT_CHOICE,
- getattr(self, podname + "_node_choiceOnChoice"))
-
- getattr(self, podname + "_panel").SetSizer(globals()
- [podname + "_bSizer"])
- getattr(self, podname + "_panel").Layout()
- globals()[podname + "_bSizer"].Fit(getattr(self, podname + "_panel"))
- arguments_label.Add(
- getattr(self, podname + "_panel"), 1, wx.EXPAND | wx.ALL, 5)
- getattr(self, podname + "_panel").Hide()
-
- setattr(MyFrame_Config, podname + "_buttonOnButtonClick",
- MyFrame_Config.show_pod_panel(podname))
- getattr(self, podname + "_button").Bind(wx.EVT_BUTTON,
- getattr(self, podname + "_buttonOnButtonClick"))
-
- def loginfo(self):
- self.node_info = {}
- for key, value in self.pods_dict.items():
- if not (key == "live" or key == "vod"):
- getattr(self, key + "_button").SetBackgroundColour("#00FFFF")
- for i in value.keys():
- if i.find("transcode") == -1:
- value[i] = getattr(
- self, key + "_" + i + "_choice").GetStringSelection()
- else:
- globals()[key + i + "_isready"] = True
- for option in value[i].keys():
- if option == "output":
- value[i][option] = getattr(
- self, key + "_" + i + "_output_textCtrl").GetValue()
- else:
- value[i][option] = getattr(
- self, key + "_" + i + "_" + option + "_choice").GetStringSelection()
- if len(value[i][option]) == 0:
- globals()[key + i + "_isready"] = False
-
- if len(value[i]) == 0 or value[i] == "0":
- getattr(
- self, key + "_button").SetBackgroundColour("#FFFFFF")
-
- if re.search("live\d", key) and not (globals()[key + "transcode0_isready"] or globals()[key + "transcode1_isready"] or globals()[key + "transcode2_isready"] or globals()[key + "transcode3_isready"]):
- getattr(self, key + "_button").SetBackgroundColour("#FFFFFF")
-
- for node in self.choice_list:
- self.node_info[node["nodename"]] = {
- "modules": [], "cpu": 0, "memory": 0}
- for key, value in self.pods_dict.items():
- if value["node"] == node["nodename"] and getattr(self, key + "_button").GetBackgroundColour() == "#00FFFF":
- self.node_info[node["nodename"]]["modules"].append(key)
- self.node_info[node["nodename"]]["cpu"] += float(
- value["cpu"]) if len(value["cpu"]) > 0 else 0
- self.node_info[node["nodename"]]["memory"] += int(
- value["memory"]) if len(value["memory"]) > 0 else 0
-
- text_info = ""
- for item in self.choice_list:
- text_info += "Name: %s\nPods: %s\nCPU: capacity: %-10d used: %.1f\nMEMORY: capacity: %-8d used: %8d\n" % (item['nodename'], reduce(
- lambda x, y: x + ' ' + y, self.node_info[item['nodename']]['modules']) if len(self.node_info[item['nodename']]['modules']) else None, item['cpu'], self.node_info[item['nodename']]['cpu'], item['memory'], self.node_info[item['nodename']]['memory'])
- text_info += "ERROR cpu undercapacity \n" if item['cpu'] < self.node_info[item['nodename']]['cpu'] else ""
- text_info += "ERROR memory undercapacity \n" if item['memory'] < self.node_info[item['nodename']]['memory'] else ""
- text_info += "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n"
-
- self.log_textCtrl.AppendText(text_info)
-
-
- @staticmethod
- def node_choiceOnChoice(podname):
- def fun(self, event):
- getattr(self, podname + "_cpu_choice").SetItems([["0.5"] + [str(num) for num in range(1, node["cpu"])]
- for node in self.choice_list if node["nodename"] == getattr(self, podname + "_node_choice").GetStringSelection()][0])
- getattr(self, podname + "_memory_choice").SetItems([["500"] + [str(num) for num in range(1000, node["memory"], 500)]
- for node in self.choice_list if node["nodename"] == getattr(self, podname + "_node_choice").GetStringSelection()][0])
- return fun
-
- @staticmethod
- def mode_choiceOnChoice(podname):
- def fun(self, event):
- codec_list = ["AVC", "HEVC"] if getattr(
- self, podname + "_mode_choice").GetStringSelection() == "HW" else ["AVC", "HEVC", "AV1"]
- for num in range(4):
- getattr(self, "%s_transcode%d_codec_choice" %
- (podname, num)).SetItems(codec_list)
- return fun
-
- @staticmethod
- def cancel_pod_panel(podname):
- def fun(self, event):
- for i in ["node", "cpu", "memory", "mode", "input"]:
- getattr(self, podname + "_" + i + "_choice").SetSelection(-1)
- if re.search("live\d", podname):
- for num in range(4):
- for option in ["codec", "protocol", "resolution", "bitrate"]:
- getattr(self, "%s_transcode%d_%s_choice" %
- (podname, num, option)).SetSelection(-1)
- self.loginfo()
- return fun
-
- @staticmethod
- def show_pod_panel(podname):
- def fun(self, event):
- for key, value in self.pods_dict.items():
- try:
- getattr(self, key + "_panel").Hide()
- except:
- pass
- self.arguments_label.StaticBox.SetLabel(podname + "_config")
- getattr(self, podname + "_panel").Show()
- if re.search("[(vod)|(live)]\d", podname):
- self.setsize()
- self.loginfo()
- return fun
-
- def setsize(self):
- self.arguments_panel.SetSize(self.arguments_panel.GetSize(
- )[0] + (-1 if self.setsize_num % 2 else 1), self.arguments_panel.GetSize()[1])
- self.setsize_num += 1
-
- @staticmethod
- def creat_buttonOnButtonClick(modulename):
- def fun(self, event):
- setattr(self, modulename + str(getattr(self, modulename + "_num")) + "_button", wx.Button(getattr(self, modulename + "_scrolledWindow"),
- wx.ID_ANY, modulename + str(getattr(self, modulename + "_num")), (20, getattr(self, modulename + "_num") * 60), wx.DefaultSize, 0))
- getattr(self, modulename + "_list_wSizer").Add(getattr(self, modulename +
- str(getattr(self, modulename + "_num")) + "_button"), 0, wx.ALL, 5)
- self.pods_dict[modulename +
- str(getattr(self, modulename + "_num"))] = {}
- self.create_pod_panel(
- modulename + str(getattr(self, modulename + "_num")), self.arguments_label)
- setattr(self, modulename + "_num",
- getattr(self, modulename + "_num") + 1)
- self.setsize()
- return fun
-
- def creat_module_panel(self, arguments_label, modulename):
- setattr(self, modulename + "_panel", wx.ScrolledWindow(arguments_label.GetStaticBox(),
- wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL))
- getattr(self, modulename + "_panel").SetScrollRate(5, 5)
- globals()[modulename + "_bSizer"] = wx.BoxSizer(wx.VERTICAL)
-
- setattr(self, modulename + "_scrolledWindow", wx.ScrolledWindow(getattr(self, modulename +
- "_panel"), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL))
- getattr(self, modulename + "_scrolledWindow").SetScrollRate(5, 5)
- globals()[modulename + "_list_wSizer"] = wx.WrapSizer(wx.HORIZONTAL,
- wx.WRAPSIZER_DEFAULT_FLAGS)
- setattr(self, modulename + "_list_wSizer",
- globals()[modulename + "_list_wSizer"])
-
- getattr(self, modulename + "_scrolledWindow").SetSizer(globals()
- [modulename + "_list_wSizer"])
- getattr(self, modulename + "_scrolledWindow").Layout()
- globals()[modulename + "_list_wSizer"].Fit(getattr(self,
- modulename + "_scrolledWindow"))
- globals()[modulename + "_bSizer"].Add(getattr(self,
- modulename + "_scrolledWindow"), 1, wx.EXPAND | wx.ALL, 5)
-
- setattr(self, modulename + "_creat_button", wx.Button(getattr(self, modulename +
- "_panel"), wx.ID_ANY, u"creat", wx.DefaultPosition, wx.DefaultSize, 0))
- globals()[modulename + "_bSizer"].Add(getattr(self,
- modulename + "_creat_button"), 0, wx.ALL, 5)
- setattr(self, modulename + "_bSizer",
- globals()[modulename + "_bSizer"])
-
- getattr(self, modulename + "_panel").SetSizer(globals()
- [modulename + "_bSizer"])
- getattr(self, modulename + "_panel").Layout()
- globals()[modulename + "_bSizer"].Fit(getattr(self, modulename + "_panel"))
- arguments_label.Add(
- getattr(self, modulename + "_panel"), 1, wx.EXPAND | wx.ALL, 5)
-
- setattr(MyFrame_Config, modulename + "_creat_buttonOnButtonClick",
- MyFrame_Config.creat_buttonOnButtonClick(modulename))
- getattr(self, modulename + "_creat_button").Bind(wx.EVT_BUTTON,
- getattr(self, modulename + "_creat_buttonOnButtonClick"))
-
- def menu_sdbSizerOnCancelButtonClick(self, event):
- sys.exit(1)
-
- def menu_sdbSizerOnOKButtonClick(self, event):
- self.loginfo()
- pods = []
- for key, value in self.node_info.items():
- for pod in value["modules"]:
- if getattr(self, pod + "_button").GetBackgroundColour() == (0, 255, 255, 255):
- pods.extend(value["modules"])
- pods = list(set(pods))
-
- for module in ["cdn", "redis", "zookeeper", "kafka"]:
- if module not in pods:
- self.log_textCtrl.AppendText(module + " not config\n")
- return
-
- update_yaml.update_yaml(nfs_server=self.nfs_server, volume_directory=self.volume_directory, dir_path = sys.argv[1],
- pods=pods, pods_dict=self.pods_dict, node_dict=self.node_dict)
- self.Destroy()
diff --git a/deployment/kubernetes/frame_index.py b/deployment/kubernetes/frame_index.py
deleted file mode 100755
index 501ecb1..0000000
--- a/deployment/kubernetes/frame_index.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/python3
-
-import wx
-import wx.xrc
-import re
-import os
-import sys
-import functools
-from frame_config import MyFrame_Config
-
-class MyFrame_Index (wx.Frame):
-
- def __init__(self, parent):
- self.errwin = wx.MessageDialog(
- parent=None,
- message=u"Invalid parameter, Please input a integer ...",
- caption=u"ERROR",
- style=wx.OK)
-
- wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u"NFS Server Configuration",
- pos=wx.DefaultPosition, size=wx.Size(500, 300), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
- self.Bind(wx.EVT_CLOSE, self.OnClose)
-
- self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
-
- gSizer1 = wx.GridSizer(0, 2, 0, 0)
-
- self.m_staticText1 = wx.StaticText(
- self, wx.ID_ANY, u"IP address:", wx.DefaultPosition, wx.DefaultSize, 0)
- self.m_staticText1.Wrap(-1)
-
- gSizer1.Add(self.m_staticText1, 0, wx.ALIGN_CENTER_HORIZONTAL |
- wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
-
- self.m_textCtrl1 = wx.TextCtrl(
- self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
- gSizer1.Add(self.m_textCtrl1, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
-
- self.m_staticText3 = wx.StaticText(
- self, wx.ID_ANY, u"Username:", wx.DefaultPosition, wx.DefaultSize, 0)
- self.m_staticText3.Wrap(-1)
-
- gSizer1.Add(self.m_staticText3, 0, wx.ALIGN_CENTER_HORIZONTAL |
- wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
-
- self.m_textCtrl2 = wx.TextCtrl(
- self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
- gSizer1.Add(self.m_textCtrl2, 0, wx.ALL, 5)
-
- self.m_staticText4 = wx.StaticText(
- self, wx.ID_ANY, u"Password:", wx.DefaultPosition, wx.DefaultSize, 0)
- self.m_staticText4.Wrap(-1)
-
- gSizer1.Add(self.m_staticText4, 0, wx.ALIGN_CENTER_HORIZONTAL |
- wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
-
- self.m_textCtrl3 = wx.TextCtrl(
- self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_PASSWORD)
- gSizer1.Add(self.m_textCtrl3, 0, wx.ALL, 5)
-
- self.m_staticText5 = wx.StaticText(
- self, wx.ID_ANY, u"Project directory path:", wx.DefaultPosition, wx.DefaultSize, 0)
- self.m_staticText5.Wrap(-1)
-
- gSizer1.Add(self.m_staticText5, 0, wx.ALIGN_CENTER_HORIZONTAL |
- wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
-
- self.m_textCtrl4 = wx.TextCtrl(
- self, wx.ID_ANY, wx.EmptyString, wx.Point(-1, -1), wx.DefaultSize, 0)
- gSizer1.Add(self.m_textCtrl4, 0, wx.ALL, 5)
-
- self.m_button1 = wx.Button(
- self, wx.ID_ANY, u"cancel", wx.DefaultPosition, wx.DefaultSize, 0)
-
- self.Bind(wx.EVT_BUTTON, self.cancelEvent, self.m_button1)
- gSizer1.Add(self.m_button1, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
-
- self.m_button2 = wx.Button(
- self, wx.ID_ANY, u"save", wx.DefaultPosition, wx.DefaultSize, 0)
- self.Bind(wx.EVT_BUTTON, self.sureEvent, self.m_button2)
- gSizer1.Add(self.m_button2, 0, wx.ALL, 5)
-
- self.SetSizer(gSizer1)
- self.Layout()
-
- self.Centre(wx.BOTH)
-
- def __del__(self):
- pass
-
- def OnClose(self, event):
- sys.exit(1)
-
- def sureEvent(self, event):
- self.nfs_server = self.m_textCtrl1.GetValue()
- self.username = self.m_textCtrl2.GetValue()
- self.password = self.m_textCtrl3.GetValue()
- self.volume_directory = self.m_textCtrl4.GetValue()
-
- check_info = self.check_info()
- if not check_info == "OK":
- self.errwin.SetMessage(check_info)
- self.errwin.ShowModal()
- return
-
- try:
- exec_cmd = os.popen("fab -u %s -p %s -H %s -- 'ls %s'" % (self.username, self.password,
- self.nfs_server, os.path.join(self.volume_directory, "volume/video/archive")))
- result = [re.findall(r'[^\\\s/:\*\?"<>\|]+', i)
- for i in re.findall(r'out:(.+)\n', exec_cmd.read())]
- video_list = [i for i in functools.reduce(
- lambda x, y:x+y, result) if os.path.splitext(i)[1] == '.mp4']
- except:
- self.errwin.SetMessage("connect error")
- self.errwin.ShowModal()
- return
-
- if len(video_list) == 0:
- self.errwin.SetMessage("no video")
- self.errwin.ShowModal()
- return
-
- self.Destroy()
- frame_config = MyFrame_Config(None, nfs_server=self.nfs_server,
- volume_directory=self.volume_directory, video_list=video_list)
- frame_config.Show(True)
-
- def cancelEvent(self, event):
- sys.exit(1)
-
- def check_info(self):
- if not re.match("((25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))\.){3}(25[0-5]|2[0-4]\d|((1\d{2})|([1-9]?\d)))$", self.nfs_server):
- return "ip is error"
- for info in ["username", "password", "volume_directory"]:
- if not re.search("^([\w_\-\&/]+)+$", getattr(self, info)):
- return info + " error"
-
- if not os.path.isabs(self.volume_directory):
- return "not abs"
- elif re.match(".+/$", self.volume_directory):
- self.volume_directory = self.volume_directory[:-1]
-
- return "OK"
diff --git a/deployment/kubernetes/helm/.gitignore b/deployment/kubernetes/helm/.gitignore
new file mode 100644
index 0000000..d237d92
--- /dev/null
+++ b/deployment/kubernetes/helm/.gitignore
@@ -0,0 +1,2 @@
+cdn-transcode/values.yaml
+*-pv.yaml
diff --git a/deployment/kubernetes/helm/CMakeLists.txt b/deployment/kubernetes/helm/CMakeLists.txt
new file mode 100644
index 0000000..966a884
--- /dev/null
+++ b/deployment/kubernetes/helm/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(service "helm")
+include("${CMAKE_SOURCE_DIR}/script/service.cmake")
+include("${CMAKE_SOURCE_DIR}/script/deployment.cmake")
+add_dependencies(build_${service} build_pv)
diff --git a/deployment/kubernetes/helm/build.sh b/deployment/kubernetes/helm/build.sh
new file mode 100755
index 0000000..fd3625b
--- /dev/null
+++ b/deployment/kubernetes/helm/build.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+DIR=$(dirname $(readlink -f "$0"))
+NVODS="${1:-1}"
+NLIVES="${2:-1}"
+SCENARIO="${3:-transcode}"
+PLATFORM="${4:-Xeon}"
+REGISTRY="$5"
+HOSTIP=$(ip route get 8.8.8.8 | awk '/ src /{split(substr($0,index($0," src ")),f);print f[2];exit}')
+
+# make sure helm is functional
+helm version >/dev/null 2>/dev/null || exit 0
+
+echo "Generating helm chart"
+. "${DIR}/../volume-info.sh"
+m4 -DREGISTRY_PREFIX=${REGISTRY} -DNVODS=${NVODS} -DNLIVES=${NLIVES} -DSCENARIO=${SCENARIO} -DPLATFORM=${PLATFORM} -DUSERID=$(id -u) -DGROUPID=$(id -g) -DHOSTIP=${HOSTIP} $(env | grep _VOLUME_ | sed 's/^/-D/') -I "${DIR}/cdn-transcode" "$DIR/cdn-transcode/values.yaml.m4" > "$DIR/cdn-transcode/values.yaml"
+
diff --git a/deployment/kubernetes/helm/cdn-transcode/Chart.yaml b/deployment/kubernetes/helm/cdn-transcode/Chart.yaml
new file mode 100644
index 0000000..0be8f4a
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/Chart.yaml
@@ -0,0 +1,10 @@
+apiVersion: v2
+appVersion: 0.1.0
+description: A Helm chart for the CDN Transcode sample
+home: https://github.com/OpenVisualCloud/CDN-Transocde-Sample
+icon: https://raw.githubusercontent.com/OpenVisualCloud/CDN-Transcode-Sample/master/volume/html/favicon.ico
+name: cdn-transcode-sample
+sources:
+- https://github.com/OpenVisualCloud/CDN-Transcode-Sample
+type: application
+version: 0.1.0
diff --git a/deployment/kubernetes/helm/cdn-transcode/README.md b/deployment/kubernetes/helm/cdn-transcode/README.md
new file mode 100644
index 0000000..2767794
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/README.md
@@ -0,0 +1,39 @@
+
+The CDN Transcode Sample is an Open Visual Cloud software stack with all required open source ingredients well integrated to provide out-of-box simple transcode or CDN media transcode service, including live streaming and video on demand. It also provides docker-based media delivery software development environment upon which developer can easily build their specific applications.
+
+### Prerequisites:
+
+The Sample assumes that you have a ready-to-use Kubernetes cluster environment with `helm` to manage the applicatoin deployment.
+
+### Build:
+
+```bash
+mkdir build
+cd build
+cmake ..
+make
+```
+
+---
+
+If you deploy the sample to a cluster, please configure the sample, as `cmake -DREGISTRY= ..`, to push the sample images to the private docker registry after each build.
+
+---
+
+### Create Shared Volumes:
+
+```bash
+make volume
+```
+
+The `make volume` command creates local persistent volumes under the /tmp directory of the first two Kubernetes workers. This is a temporary solution for quick sample deployment. For scalability beyond a two-node cluster, consider rewriting the `mkvolume.sh` script.
+
+`make volume` uses `scp` to copy volumes to the Kubernetes workers, assuming that the Kubernetes master can password-less access to the Kubernetes workers.
+
+### Start/Stop Sample:
+
+```bash
+make start_helm
+make stop_helm
+```
+
diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/cdn.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/cdn.yaml
new file mode 100644
index 0000000..d33954e
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/templates/cdn.yaml
@@ -0,0 +1,73 @@
+
+{{- if eq "cdn" $.Values.scenario }}
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: cdn-service
+ name: cdn-service
+spec:
+ ports:
+ - name: "443"
+ port: 443
+ targetPort: 8443
+ - name: "1935"
+ port: 1935
+ targetPort: 1935
+ externalIPs:
+ - "{{ .Values.cdn.hostIP }}"
+ selector:
+ app: cdn-service
+
+---
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: cdn-service
+ name: cdn-service
+spec:
+ selector:
+ matchLabels:
+ app: cdn-service
+ replicas: 1
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: cdn-service
+ spec:
+ containers:
+ - args:
+ - bash
+ - -c
+ - /home/main.py&/usr/local/sbin/nginx
+ image: {{ $.Values.registryPrefix }}tc_{{ $.Values.scenario }}_service:latest
+ imagePullPolicy: IfNotPresent
+ name: cdn-service
+ ports:
+ - containerPort: 8443
+ - containerPort: 1935
+ volumeMounts:
+ - mountPath: /var/run/secrets
+ name: secrets
+ readOnly: true
+ - mountPath: /var/www/archive
+ name: archive
+ - mountPath: /var/www/video
+ name: cache
+ volumes:
+ - name: secrets
+ secret:
+ secretName: self-signed-certificate
+ - name: archive
+ persistentVolumeClaim:
+ claimName: video-archive
+ - name: cache
+ persistentVolumeClaim:
+ claimName: video-cache
+ restartPolicy: Always
+
+{{- end }}
diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/kafka.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/kafka.yaml
new file mode 100644
index 0000000..23cc6bb
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/templates/kafka.yaml
@@ -0,0 +1,76 @@
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: kafka-service
+ name: kafka-service
+spec:
+ ports:
+ - name: "9092"
+ port: 9092
+ targetPort: 9092
+ selector:
+ app: kafka-service
+
+---
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: kafka-service
+ name: kafka-service
+spec:
+ selector:
+ matchLabels:
+ app: kafka-service
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: kafka-service
+ spec:
+ containers:
+ - env:
+ - name: KAFKA_ADVERTISED_HOST_NAME
+ value: kafka-service
+ - name: KAFKA_ADVERTISED_LISTENERS
+ value: PLAINTEXT://kafka-service:9092
+ - name: KAFKA_ADVERTISED_PORT
+ value: "9092"
+ - name: KAFKA_AUTO_CREATE_TOPICS_ENABLE
+ value: "true"
+ - name: KAFKA_BROKER_ID
+ value: "1"
+ - name: KAFKA_CREATE_TOPICS
+ value: content_provider_sched:16:1
+ - name: KAFKA_DEFAULT_REPLICATION_FACTOR
+ value: "1"
+ - name: KAFKA_HEAP_OPTS
+ value: -Xmx{{ .Values.kafka.heapSize }} -Xms{{ .Values.kafka.heapSize }}
+ - name: KAFKA_INTER_BROKER_LISTENER_NAME
+ value: PLAINTEXT
+ - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
+ value: PLAINTEXT:PLAINTEXT
+ - name: KAFKA_LOG4J_LOGGERS
+ value: kafka=ERROR,kafka.controller=ERROR,state.change.logger=ERROR,org.apache.kafka=ERROR
+ - name: KAFKA_LOG4J_ROOT_LOGLEVEL
+ value: ERROR
+ - name: KAFKA_LOG_RETENTION_HOURS
+ value: "8"
+ - name: KAFKA_NUM_PARTITIONS
+ value: "16"
+ - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
+ value: "1"
+ - name: KAFKA_ZOOKEEPER_CONNECT
+ value: zookeeper-service:2181
+ image: {{ $.Values.registryPrefix }}tc_kafka_service:latest
+ imagePullPolicy: IfNotPresent
+ name: kafka-service
+ ports:
+ - containerPort: 9092
+ securityContext:
+ runAsUser: 1000
+ restartPolicy: Always
+
diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/live.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/live.yaml
new file mode 100644
index 0000000..71edb0e
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/templates/live.yaml
@@ -0,0 +1,73 @@
+
+{{- if eq "cdn" $.Values.scenario }}
+
+{{- range $i,$v1 := .Values.liveTranscode.streams }}
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: live-service-{{ $i }}
+ name: live-service-{{ $i }}
+spec:
+ selector:
+ matchLabels:
+ app: live-service-{{ $i }}
+ replicas: {{if lt (int $i) (int $.Values.liveTranscode.replicas)}}1{{else}}0{{end}}
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: live-service-{{ $i }}
+ spec:
+ containers:
+ - image: {{ $.Values.registryPrefix }}tc_xcode_{{ lower $.Values.platform }}:latest
+ imagePullPolicy: IfNotPresent
+ command: ["/usr/local/bin/ffmpeg","-re","-stream_loop","-1",
+ "-i","{{ .name }}",
+{{- range $k,$v2 := .transcode }}
+ "-vf","scale={{ .scale }}",
+ "-c:v","{{ .encoderType }}",
+ "-b:v","{{ .bitrate }}",
+ "-r","{{ .framerate }}",
+ "-g","{{ .gop }}",
+ "-bf","{{ .maxbframes }}",
+ "-refs","{{ .refsNum }}",
+ "-preset","{{ .preset }}",
+ "-forced-idr","1",
+{{- if eq ( hasPrefix "libsvt" .encoderType ) true }}
+ "-thread_count","96",
+{{- end }}
+ "-an",
+ "-f","flv","rtmp://cdn-service/{{ .protocol }}/media_{{ $i }}_{{ $k }}",
+{{- end }}
+ "-abr_pipeline"]
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - rm
+ - -rf
+{{- range $k,$v2 := .transcode }}
+ - ' /var/www/video/{{ .protocol }}/media_{{ $i }}_{{ $k }}'
+{{- end }}
+ name: live-service-{{ $i }}
+ env:
+ - name: NO_PROXY
+ value: "cdn-service"
+ - name: no_proxy
+ value: "cdn-service"
+ volumeMounts:
+ - mountPath: /var/www/archive
+ name: archive
+ readOnly: true
+ volumes:
+ - name: archive
+ persistentVolumeClaim:
+ claimName: video-archive
+ restartPolicy: Always
+
+---
+{{- end }}
+{{- end }}
+
diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/redis.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/redis.yaml
new file mode 100644
index 0000000..43ec5f9
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/templates/redis.yaml
@@ -0,0 +1,48 @@
+{{- if eq "cdn" $.Values.scenario }}
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: redis-service
+ name: redis-service
+spec:
+ ports:
+ - name: "6379"
+ port: 6379
+ targetPort: 6379
+ selector:
+ app: redis-service
+
+---
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: redis-service
+ name: redis-service
+spec:
+ selector:
+ matchLabels:
+ app: redis-service
+ replicas: 1
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: redis-service
+ spec:
+ containers:
+ - args:
+ - redis-server
+ image: redis:latest
+ imagePullPolicy: IfNotPresent
+ name: redis-service
+ ports:
+ - containerPort: 6379
+ securityContext:
+ runAsUser: 999
+ restartPolicy: Always
+
+{{- end }}
diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml
new file mode 100644
index 0000000..9ab2c9e
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/templates/video-archive-pvc.yaml
@@ -0,0 +1,13 @@
+
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: video-archive
+spec:
+ accessModes:
+ - ReadWriteMany
+ storageClassName: video-archive
+ resources:
+ requests:
+ storage: "{{ .Values.volume.video.archive.size }}"
+
diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/video-cache-pvc.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/video-cache-pvc.yaml
new file mode 100644
index 0000000..2c34039
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/templates/video-cache-pvc.yaml
@@ -0,0 +1,13 @@
+
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: video-cache
+spec:
+ accessModes:
+ - ReadWriteMany
+ storageClassName: video-cache
+ resources:
+ requests:
+ storage: "{{ .Values.volume.video.cache.size }}"
+
diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/xcode.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/xcode.yaml
new file mode 100644
index 0000000..2cd3f43
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/templates/xcode.yaml
@@ -0,0 +1,104 @@
+
+{{- range $deviceIdx := until ( int ( $.Values.hwDeviceNum ) ) }}
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: xcode-service-{{ $deviceIdx }}
+ name: xcode-service-{{ $deviceIdx }}
+spec:
+ selector:
+ matchLabels:
+ app: xcode-service-{{ $deviceIdx }}
+ replicas: {{ $.Values.vodTranscode.replicas }}
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: xcode-service-{{ $deviceIdx }}
+ spec:
+ containers:
+ - args:
+ - bash
+ - -c
+ - /home/main.py
+ image: {{ $.Values.registryPrefix }}tc_xcode_{{ lower $.Values.platform }}:latest
+ imagePullPolicy: IfNotPresent
+ name: xcode-service-{{ $deviceIdx }}
+ env:
+{{- if ne $.Values.platform "Xeon" }}
+ - name: HW_ACC_TYPE
+ value: {{ $.Values.hwAccType }}
+ - name: HW_DEVICE
+ value: /dev/dri/renderD{{ add $deviceIdx 128 }}
+{{- end }}
+ - name: SCENARIO
+ value: {{ $.Values.scenario | quote }}
+ - name: NO_PROXY
+ value: "*"
+ - name: no_proxy
+ value: "*"
+ volumeMounts:
+ - mountPath: /var/www/archive
+ name: archive
+ readOnly: true
+ - mountPath: /var/www/video
+ name: cache
+{{- if ne $.Values.platform "Xeon" }}
+# resources:
+# limits:
+# gpu.intel.com/i915: 1
+ securityContext:
+ privileged: true
+ runAsUser: 0
+{{- end }}
+ volumes:
+ - name: archive
+ persistentVolumeClaim:
+ claimName: video-archive
+ - name: cache
+ persistentVolumeClaim:
+ claimName: video-cache
+ restartPolicy: Always
+
+---
+{{- end }}
+
+
+{{- if ne "cdn" $.Values.scenario }}
+---
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: benchmark
+spec:
+ template:
+ spec:
+ enableServiceLinks: false
+ containers:
+ - name: benchmark
+ image: {{ $.Values.registryPrefix }}tc_benchmark_service:latest
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: NO_PROXY
+ value: "*"
+ - name: no_proxy
+ value: "*"
+ volumeMounts:
+ - mountPath: /var/www/archive
+ name: video-archive
+ readOnly: true
+ - mountPath: /var/www/video
+ name: video-cache
+ volumes:
+ - name: video-archive
+ persistentVolumeClaim:
+ claimName: video-archive
+ - name: video-cache
+ persistentVolumeClaim:
+ claimName: video-cache
+ restartPolicy: Never
+
+{{- end }}
diff --git a/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper.yaml b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper.yaml
new file mode 100644
index 0000000..307f370
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/templates/zookeeper.yaml
@@ -0,0 +1,58 @@
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: zookeeper-service
+ name: zookeeper-service
+spec:
+ ports:
+ - name: "2181"
+ port: 2181
+ targetPort: 2181
+ selector:
+ app: zookeeper-service
+
+---
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: zookeeper-service
+ name: zookeeper-service
+spec:
+ selector:
+ matchLabels:
+ app: zookeeper-service
+ replicas: 1
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: zookeeper-service
+ spec:
+ containers:
+ - env:
+ - name: ZOOKEEPER_CLIENT_PORT
+ value: "2181"
+ - name: ZOOKEEPER_HEAP_OPTS
+ value: -Xmx{{ $.Values.zookeeper.heapSize }} -Xms{{ $.Values.zookeeper.heapSize }}
+ - name: ZOOKEEPER_LOG4J_LOGGERS
+ value: zookeepr=ERROR
+ - name: ZOOKEEPER_LOG4J_ROOT_LOGLEVEL
+ value: ERROR
+ - name: ZOOKEEPER_MAX_CLIENT_CNXNS
+ value: "20000"
+ - name: ZOOKEEPER_SERVER_ID
+ value: "1"
+ - name: ZOOKEEPER_TICK_TIME
+ value: "2000"
+ image: zookeeper:3.5.6
+ imagePullPolicy: IfNotPresent
+ name: zookeeper-service
+ ports:
+ - containerPort: 2181
+ securityContext:
+ runAsUser: 1000
+ restartPolicy: Always
diff --git a/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4 b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4
new file mode 100644
index 0000000..84963bf
--- /dev/null
+++ b/deployment/kubernetes/helm/cdn-transcode/values.yaml.m4
@@ -0,0 +1,58 @@
+
+registryPrefix: "defn(`REGISTRY_PREFIX')"
+
+# platform specifies the target platform: Xeon or XeonE3.
+platform: "defn(`PLATFORM')"
+
+# transcoding with HW QSV or VAAPI: qsv or vaapi.
+hwAccType: "qsv"
+hwDeviceNum: ifelse(defn(`PLATFORM'),`SG1',4,1)
+
+# scenario specifies the mode: cdn or batch.
+scenario: "defn(`SCENARIO')"
+
+zookeeper:
+ heapSize: 1024m
+
+kafka:
+ heapSize: 1024m
+
+liveTranscode:
+ replicas: defn(`NLIVES')
+ streams:
+ - name: "/var/www/archive/3115738.mp4"
+ transcode:
+ - protocol: dash
+ scale: "856:480"
+ bitrate: "8000000"
+ framerate: 25
+ gop: 100
+ maxbframes: 2
+ refsNum: 2
+ preset: veryfast
+ encoderType: libx264
+ - name: "/var/www/archive/3115738.mp4"
+ transcode:
+ - protocol: hls
+ scale: "856:480"
+ bitrate: "8000000"
+ framerate: 25
+ gop: 100
+ maxbframes: 2
+ refsNum: 2
+ preset: 9
+ encoderType: libsvt_hevc
+
+vodTranscode:
+ replicas: defn(`NVODS')
+
+cdn:
+ hostIP: defn(`HOSTIP')
+
+volume:
+ video:
+ archive:
+ size: defn(`VIDEO_ARCHIVE_VOLUME_SIZE')
+ cache:
+ size: defn(`VIDEO_CACHE_VOLUME_SIZE')
+
diff --git a/deployment/kubernetes/helm/start.sh b/deployment/kubernetes/helm/start.sh
new file mode 100755
index 0000000..979796d
--- /dev/null
+++ b/deployment/kubernetes/helm/start.sh
@@ -0,0 +1,16 @@
+#!/bin/bash -e
+
+DIR=$(dirname $(readlink -f "$0"))
+
+function create_secret {
+ kubectl create secret generic self-signed-certificate "--from-file=${DIR}/../../certificate/self.crt" "--from-file=${DIR}/../../certificate/self.key"
+}
+
+# create secrets
+"$DIR/../../certificate/self-sign.sh"
+create_secret 2>/dev/null || (kubectl delete secret self-signed-certificate; create_secret)
+
+for yaml in $(find "$DIR/.." -maxdepth 1 -name "*-pv.yaml" -print); do
+ kubectl apply -f "$yaml"
+done
+helm install cdn-transcode "$DIR/cdn-transcode"
diff --git a/deployment/kubernetes/helm/stop.sh b/deployment/kubernetes/helm/stop.sh
new file mode 100755
index 0000000..a824f88
--- /dev/null
+++ b/deployment/kubernetes/helm/stop.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+DIR=$(dirname $(readlink -f "$0"))
+
+helm uninstall cdn-transcode
+
+# delete pvs and scs
+for yaml in $(find "${DIR}/.." -maxdepth 1 -name "*-pv.yaml" -print); do
+ kubectl delete --wait=false -f "$yaml" --ignore-not-found=true 2>/dev/null
+done
+
+kubectl delete secret self-signed-certificate 2> /dev/null || echo -n ""
diff --git a/deployment/kubernetes/logging/elasticsearch/es-statefulSet.yaml b/deployment/kubernetes/logging/elasticsearch/es-statefulSet.yaml
deleted file mode 100644
index ba32a53..0000000
--- a/deployment/kubernetes/logging/elasticsearch/es-statefulSet.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-# RBAC authn and authz
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: elasticsearch-logging
- namespace: kube-system
- labels:
- k8s-app: elasticsearch-logging
- addonmanager.kubernetes.io/mode: Reconcile
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: elasticsearch-logging
- labels:
- k8s-app: elasticsearch-logging
- addonmanager.kubernetes.io/mode: Reconcile
-rules:
-- apiGroups:
- - ""
- resources:
- - "services"
- - "namespaces"
- - "endpoints"
- verbs:
- - "get"
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- namespace: kube-system
- name: elasticsearch-logging
- labels:
- k8s-app: elasticsearch-logging
- addonmanager.kubernetes.io/mode: Reconcile
-subjects:
-- kind: ServiceAccount
- name: elasticsearch-logging
- namespace: kube-system
- apiGroup: ""
-roleRef:
- kind: ClusterRole
- name: elasticsearch-logging
- apiGroup: ""
----
-# Elasticsearch deployment itself
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: elasticsearch-logging
- namespace: kube-system
- labels:
- k8s-app: elasticsearch-logging
- addonmanager.kubernetes.io/mode: Reconcile
-spec:
- serviceName: elasticsearch-logging
- replicas: 1
- selector:
- matchLabels:
- k8s-app: elasticsearch-logging
- template:
- metadata:
- labels:
- k8s-app: elasticsearch-logging
- spec:
- serviceAccountName: elasticsearch-logging
- containers:
- - image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
- name: elasticsearch-logging
- resources:
- limits:
- cpu: 1000m
- memory: 3Gi
- requests:
- cpu: 100m
- memory: 3Gi
- ports:
- - containerPort: 9200
- name: db
- protocol: TCP
- - containerPort: 9300
- name: transport
- protocol: TCP
- volumeMounts:
- - name: elasticsearch-logging
- mountPath: /data
- env:
- - name: node.name
- value: node_name
- - name: cluster.initial_master_nodes
- value: node_name
- - name: "NAMESPACE"
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumes:
- - name: elasticsearch-logging
- emptyDir: {}
- nodeSelector:
- node-role.kubernetes.io/master: ""
- tolerations:
- - key: "node-role.kubernetes.io/master"
- effect: "NoSchedule"
- initContainers:
- - image: alpine:3.6
- command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
- name: elasticsearch-logging-init
- securityContext:
- privileged: true
diff --git a/deployment/kubernetes/logging/elasticsearch/es-svc.yaml b/deployment/kubernetes/logging/elasticsearch/es-svc.yaml
deleted file mode 100644
index 7f3a2f6..0000000
--- a/deployment/kubernetes/logging/elasticsearch/es-svc.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: elasticsearch-logging
- namespace: kube-system
- labels:
- k8s-app: elasticsearch-logging
- kubernetes.io/cluster-service: "true"
- addonmanager.kubernetes.io/mode: Reconcile
- kubernetes.io/name: "Elasticsearch"
-spec:
- ports:
- - port: 9200
- protocol: TCP
- targetPort: 9200
- nodePort: 9200
- selector:
- k8s-app: elasticsearch-logging
- type: NodePort
diff --git a/deployment/kubernetes/logging/fluentd/fluentd-es-configMap.yaml b/deployment/kubernetes/logging/fluentd/fluentd-es-configMap.yaml
deleted file mode 100644
index 5d19f83..0000000
--- a/deployment/kubernetes/logging/fluentd/fluentd-es-configMap.yaml
+++ /dev/null
@@ -1,449 +0,0 @@
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: fluentd-es-config
- namespace: kube-system
- labels:
- addonmanager.kubernetes.io/mode: Reconcile
-data:
- system.conf: |-
-
- root_dir /tmp/fluentd-buffers/
-
- containers.input.conf: |-
- # This configuration file for Fluentd / td-agent is used
- # to watch changes to Docker log files. The kubelet creates symlinks that
- # capture the pod name, namespace, container name & Docker container ID
- # to the docker logs for pods in the /var/log/containers directory on the host.
- # If running this fluentd configuration in a Docker container, the /var/log
- # directory should be mounted in the container.
- #
- # These logs are then submitted to Elasticsearch which assumes the
- # installation of the fluent-plugin-elasticsearch & the
- # fluent-plugin-kubernetes_metadata_filter plugins.
- # See https://github.com/uken/fluent-plugin-elasticsearch &
- # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
- # more information about the plugins.
- #
- # Example
- # =======
- # A line in the Docker log file might look like this JSON:
- #
- # {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
- # "stream":"stderr",
- # "time":"2014-09-25T21:15:03.499185026Z"}
- #
- # The time_format specification below makes sure we properly
- # parse the time format produced by Docker. This will be
- # submitted to Elasticsearch and should appear like:
- # $ curl 'http://elasticsearch-logging:9200/_search?pretty'
- # ...
- # {
- # "_index" : "logstash-2014.09.25",
- # "_type" : "fluentd",
- # "_id" : "VBrbor2QTuGpsQyTCdfzqA",
- # "_score" : 1.0,
- # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
- # "stream":"stderr","tag":"docker.container.all",
- # "@timestamp":"2014-09-25T22:45:50+00:00"}
- # },
- # ...
- #
- # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
- # record & add labels to the log record if properly configured. This enables users
- # to filter & search logs on any metadata.
- # For example a Docker container's logs might be in the directory:
- #
- # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
- #
- # and in the file:
- #
- # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
- #
- # where 997599971ee6... is the Docker ID of the running container.
- # The Kubernetes kubelet makes a symbolic link to this file on the host machine
- # in the /var/log/containers directory which includes the pod name and the Kubernetes
- # container name:
- #
- # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
- # ->
- # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
- #
- # The /var/log directory on the host is mapped to the /var/log directory in the container
- # running this instance of Fluentd and we end up collecting the file:
- #
- # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
- #
- # This results in the tag:
- #
- # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
- #
- # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
- # which are added to the log message as a kubernetes field object & the Docker container ID
- # is also added under the docker field object.
- # The final tag is:
- #
- # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
- #
- # And the final log record look like:
- #
- # {
- # "log":"2014/09/25 21:15:03 Got request with path wombat\n",
- # "stream":"stderr",
- # "time":"2014-09-25T21:15:03.499185026Z",
- # "kubernetes": {
- # "namespace": "default",
- # "pod_name": "synthetic-logger-0.25lps-pod",
- # "container_name": "synth-lgr"
- # },
- # "docker": {
- # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
- # }
- # }
- #
- # This makes it easier for users to search for logs by pod name or by
- # the name of the Kubernetes container regardless of how many times the
- # Kubernetes pod has been restarted (resulting in a several Docker container IDs).
- # Json Log Example:
- # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
- # CRI Log Example:
- # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
-
- @id fluentd-containers.log
- @type tail
- path /var/log/containers/*.log
- pos_file /var/log/es-containers.log.pos
- tag raw.kubernetes.*
- read_from_head true
-
- @type multi_format
-
- format json
- time_key time
- time_format %Y-%m-%dT%H:%M:%S.%NZ
-
-
- format /^(?
-
-
- # Detect exceptions in the log output and forward them as one log entry.
-
- @id raw.kubernetes
- @type detect_exceptions
- remove_tag_prefix raw
- message log
- stream stream
- multiline_flush_interval 5
- max_bytes 500000
- max_lines 1000
-
- # Concatenate multi-line logs
-
- @id filter_concat
- @type concat
- key message
- multiline_end_regexp /\n$/
- separator ""
-
- # Enriches records with Kubernetes metadata
-
- @id filter_kubernetes_metadata
- @type kubernetes_metadata
-
- # Fixes json fields in Elasticsearch
-
- @id filter_parser
- @type parser
- key_name log
- reserve_data true
- remove_key_name_field true
-
- @type multi_format
-
- format json
-
-
- format none
-
-
-
- system.input.conf: |-
- # Example:
- # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
-
- @id minion
- @type tail
- format /^(?
- # Example:
- # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
-
- @id startupscript.log
- @type tail
- format syslog
- path /var/log/startupscript.log
- pos_file /var/log/es-startupscript.log.pos
- tag startupscript
-
- # Examples:
- # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
- # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
- # TODO(random-liu): Remove this after cri container runtime rolls out.
-
- @id docker.log
- @type tail
- format /^time="(?
- # Example:
- # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
-
- @id etcd.log
- @type tail
- # Not parsing this, because it doesn't have anything particularly useful to
- # parse out of it (like severities).
- format none
- path /var/log/etcd.log
- pos_file /var/log/es-etcd.log.pos
- tag etcd
-
- # Multi-line parsing is required for all the kube logs because very large log
- # statements, such as those that include entire object bodies, get split into
- # multiple lines by glog.
- # Example:
- # I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
-
- @id kubelet.log
- @type tail
- format multiline
- multiline_flush_interval 5s
- format_firstline /^\w\d{4}/
- format1 /^(?\w)(?