diff --git a/.github/workflows/maven.yml b/.github/workflows/maven.yml index 4187aab5..1249ecfd 100644 --- a/.github/workflows/maven.yml +++ b/.github/workflows/maven.yml @@ -31,3 +31,9 @@ jobs: - name: 🚀 Coveralls Coverage Report Submission working-directory: ./api run: mvn coveralls:report --define repoToken=${{ secrets.COVERALL_REPO_SECRET }} + + - name: Run on failure + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3.11 + timeout-minutes: 15 + diff --git a/.k8s/01-single-pod-with-ephemeral-volume/README.md b/.k8s/01-single-pod-with-ephemeral-volume/README.md deleted file mode 100644 index 41e226f4..00000000 --- a/.k8s/01-single-pod-with-ephemeral-volume/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Single pod with sidecar, no persistence due to ephemeral volume - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. - -## Initial actions - -Create a namespace and make it default - -``` -kubectl create namespace example-api -kubectl ns example-api - -``` - -## Deployment -``` -kubectl apply -f api-pod.yaml -kubectl wait pod api-pod --for condition=Ready --timeout=90s -kubectl port-forward api-pod 7880:80 -``` - -## Cleanup - -Delete the namespace created above: - -``` -kubectl delete namespace example-api -``` - -## Disadvantages - -- No persisence, data may be lost! -- Not scalable, containers may crash with stopping the whole pod -- Not possible to update and restart the app without relaunching database -- Very unstable way to expose the app and the latter is only to the host -- Configuration parameters are copy-pasted -- Password is hard-coded diff --git a/.k8s/01-single-pod-with-ephemeral-volume/api-pod.yaml b/.k8s/01-single-pod-with-ephemeral-volume/api-pod.yaml deleted file mode 100644 index 27b5e11d..00000000 --- a/.k8s/01-single-pod-with-ephemeral-volume/api-pod.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: api-pod -spec: - containers: - - name: example-api - env: - - name: DB_USERNAME - value: root - - name: DB_PASSWORD - value: root - - name: DB_URL - value: 127.0.0.1:5432/example-api - image: demo-registry:5000/example-api:0.1.0 - imagePullPolicy: IfNotPresent - command: - - sh - - -c - args: - - "dockerize -wait tcp://127.0.0.1:5432 -timeout 20s java -jar api.jar" - ports: - - name: http - containerPort: 80 - protocol: TCP - resources: - limits: - memory: "300Mi" - cpu: "500m" - - name: example-api-database - env: - - name: POSTGRES_DB - value: example-api - - name: POSTGRES_USER - value: root - - name: POSTGRES_PASSWORD - value: root - - name: TZ - value: America/Sao_Paulo - - name: PGTZ - value: America/Sao_Paulo - image: postgres:13 - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 5432 - resources: - limits: - memory: "128Mi" - cpu: "500m" - volumeMounts: - - mountPath: /var/lib/postgresql/data - name: postgredb-data - subPath: postgres - volumes: - - name: postgredb-data - # - # it is possible also to create a RAM volume by the following way: - # - # emptyDir: - # medium: "Memory" - emptyDir: {} diff --git a/.k8s/02-pods-with-ephemeral-volume/README.md b/.k8s/02-pods-with-ephemeral-volume/README.md deleted file mode 100644 index b129686f..00000000 --- a/.k8s/02-pods-with-ephemeral-volume/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# Only pods, no persistence due to ephemeral volume - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. - -## Initial actions - -Create a namespace and make it default - -``` -kubectl create namespace example-api -kubectl ns example-api - -``` - -## Deployment -``` -kubectl apply -f db-pod.yaml -kubectl wait pod database-pod --for condition=Ready --timeout=90s -DATABASE_POD_IP=$(kubectl get pod database-pod --template '{{.status.podIP}}') -cat api-pod-template.yaml | sed "s//${DATABASE_POD_IP//./-}/" | kubectl apply -f - -kubectl wait pod api-pod --for condition=Ready --timeout=90s -kubectl port-forward api-pod 7880:80 -``` - -## Cleanup - -Delete the namespace created above: - -``` -kubectl delete namespace example-api -``` - -## Advantages - -- Now volume with database data belongs only to database -- Components are separated: we are able to update the api app without restarting database (and losing its data) - -## Disadvantages - -- Still no persistence, data may be lost! -- Still not scalable -- If database is restarted, its pod IP changes, so we have to restart also the app, much more complex way to connect the app to DB -- Still bad way of how the app is exposed outside the cluster -- Still much copy-pasting of parameters -- Password is hard-coded diff --git a/.k8s/02-pods-with-ephemeral-volume/api-pod-template.yaml b/.k8s/02-pods-with-ephemeral-volume/api-pod-template.yaml deleted file mode 100644 index a59c9fa5..00000000 --- a/.k8s/02-pods-with-ephemeral-volume/api-pod-template.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: api-pod -spec: - containers: - - name: example-api - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: DB_USERNAME - value: root - - name: DB_PASSWORD - value: root - - name: DB_URL - # should be replaced on pod IP of database pod with dots replaced on dashes - value: .$(POD_NAMESPACE).pod.cluster.local:5432/example-api - image: demo-registry:5000/example-api:0.1.0 - imagePullPolicy: IfNotPresent - command: - - sh - - -c - # should be replaced on pod IP of database pod with dots replaced on dashes - args: - - "dockerize -wait tcp://.$(POD_NAMESPACE).pod.cluster.local:5432 -timeout 20s java -jar api.jar" - ports: - - name: http - containerPort: 80 - protocol: TCP - resources: - limits: - memory: "300Mi" - cpu: "500m" diff --git a/.k8s/02-pods-with-ephemeral-volume/db-pod.yaml b/.k8s/02-pods-with-ephemeral-volume/db-pod.yaml deleted file mode 100644 index 77fca5e4..00000000 --- a/.k8s/02-pods-with-ephemeral-volume/db-pod.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: database-pod -spec: - containers: - - name: example-api-database - env: - - name: POSTGRES_DB - value: example-api - - name: POSTGRES_USER - value: root - - name: POSTGRES_PASSWORD - value: root - - name: TZ - value: America/Sao_Paulo - - name: PGTZ - value: America/Sao_Paulo - image: postgres:13 - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 5432 - resources: - limits: - memory: "128Mi" - cpu: "500m" - volumeMounts: - - mountPath: /var/lib/postgresql/data - name: postgredb-data - subPath: postgres - volumes: - - name: postgredb-data - # - # it is possible also to create a RAM volume by the following way: - # - # emptyDir: - # medium: "Memory" - emptyDir: {} diff --git a/.k8s/03-services-with-pvc/README.md b/.k8s/03-services-with-pvc/README.md deleted file mode 100644 index 4e5481a6..00000000 --- a/.k8s/03-services-with-pvc/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Added services and persistence via PVC - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. - -## Initial actions - -Create a namespace and make it default - -``` -kubectl create namespace example-api -kubectl ns example-api - -``` - -## Deployment -``` -kubectl apply -f db-pvc.yaml -kubectl apply -f db-pod.yaml -kubectl apply -f db-service.yaml -kubectl apply -f api-pod.yaml -kubectl apply -f api-service.yaml -``` - -## Cleanup - -Delete the namespace created above: - -``` -kubectl delete namespace example-api -``` - -## Advantages - -- Components are separated -- At last persistence of database data -- Easy and stable way of how the app connects to DB -- Easy and stable way to expose the app outside the cluster, now it is possible to implement a way to reach the app not only from the host - -## Disadvantages - -- Still not scalable -- Still much copy-pasting of parameters -- Password is hard-coded diff --git a/.k8s/03-services-with-pvc/api-pod.yaml b/.k8s/03-services-with-pvc/api-pod.yaml deleted file mode 100644 index e65955c1..00000000 --- a/.k8s/03-services-with-pvc/api-pod.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: api-pod - labels: - component: api -spec: - containers: - - name: example-api - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: DB_USERNAME - value: root - - name: DB_PASSWORD - value: root - - name: DB_URL - # database is a short DNS name of a service, accessible from within the same namespace - value: database:5432/example-api - image: demo-registry:5000/example-api:0.1.0 - imagePullPolicy: IfNotPresent - command: - - sh - - -c - # database.$(POD_NAMESPACE).svc.cluster.local is a long DNS name of a service, accessible from all namespaces - args: - - "dockerize -wait tcp://database.$(POD_NAMESPACE).svc.cluster.local:5432 -timeout 20s java -jar api.jar" - ports: - - name: http - containerPort: 80 - protocol: TCP - resources: - limits: - memory: "300Mi" - cpu: "500m" diff --git a/.k8s/03-services-with-pvc/api-service.yaml b/.k8s/03-services-with-pvc/api-service.yaml deleted file mode 100644 index 9a505546..00000000 --- a/.k8s/03-services-with-pvc/api-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: api - labels: - component: api -spec: - type: LoadBalancer - ports: - - port: 8080 - targetPort: 80 - protocol: TCP - name: http - selector: - component: api diff --git a/.k8s/03-services-with-pvc/db-pod.yaml b/.k8s/03-services-with-pvc/db-pod.yaml deleted file mode 100644 index 9326e343..00000000 --- a/.k8s/03-services-with-pvc/db-pod.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: database-pod - labels: - component: db -spec: - containers: - - name: example-api-database - env: - - name: POSTGRES_DB - value: example-api - - name: POSTGRES_USER - value: root - - name: POSTGRES_PASSWORD - value: root - - name: TZ - value: America/Sao_Paulo - - name: PGTZ - value: America/Sao_Paulo - image: postgres:13 - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 5432 - resources: - limits: - memory: "128Mi" - cpu: "500m" - volumeMounts: - - mountPath: /var/lib/postgresql/data - name: postgredb-data - subPath: postgres - volumes: - - name: postgredb-data - persistentVolumeClaim: - claimName: db-pv-claim diff --git a/.k8s/03-services-with-pvc/db-pvc.yaml b/.k8s/03-services-with-pvc/db-pvc.yaml deleted file mode 100644 index 080d81ea..00000000 --- a/.k8s/03-services-with-pvc/db-pvc.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: db-pv-claim - labels: - component: db -spec: - # there are several access modes: ReadWriteOnce, ReadOnlyMany, ReadWriteMany, ReadWriteOncePod - # a volume with ReadWriteOnce can be mounted as a read-write by a single node - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/.k8s/03-services-with-pvc/db-service.yaml b/.k8s/03-services-with-pvc/db-service.yaml deleted file mode 100644 index 3a15abd3..00000000 --- a/.k8s/03-services-with-pvc/db-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: database - labels: - component: db -spec: - # ClusterIP is a default value, may be omitted - type: ClusterIP - ports: - - port: 5432 - selector: - component: db diff --git a/.k8s/04-replicasets-readiness-liveness/README.md b/.k8s/04-replicasets-readiness-liveness/README.md deleted file mode 100644 index 4e751154..00000000 --- a/.k8s/04-replicasets-readiness-liveness/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# ReplicaSets, Readiness and Liveness probes - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. - -## Initial actions - -Create a namespace and make it default - -``` -kubectl create namespace example-api -kubectl ns example-api - -``` - -## Deployment -``` -kubectl apply -f db-pvc.yaml -kubectl apply -f db-replicaset.yaml -kubectl apply -f db-service.yaml -kubectl apply -f api-replicaset.yaml -kubectl apply -f api-service.yaml -``` - -## Cleanup - -Delete the namespace created above: - -``` -kubectl delete namespace example-api -``` - -## Advantages - -- Both components are separated and scalable, possiblity to monitor readiness and liveness of components -- Database data is persisted -- Very stable way of how the app connects to DB -- Very stable and reliable way to expose the app outside the cluster - - -## Disadvantages - -- Still much copy-pasting of parameters -- Password is hard-coded -- Persistent volume is shared between all database pods, thus, impossibility to effectively scale database -- Impossibility to smoothly update to a new version of the app diff --git a/.k8s/04-replicasets-readiness-liveness/api-replicaset.yaml b/.k8s/04-replicasets-readiness-liveness/api-replicaset.yaml deleted file mode 100644 index 7794755c..00000000 --- a/.k8s/04-replicasets-readiness-liveness/api-replicaset.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: apps/v1 -kind: ReplicaSet -metadata: - name: api-replicaset - labels: - component: api -spec: - replicas: 2 - selector: - matchLabels: - component: api - template: - metadata: - labels: - component: api - spec: - containers: - - name: example-api - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: DB_USERNAME - value: root - - name: DB_PASSWORD - value: root - - name: DB_URL - # database is a short DNS name of a service, accessible from within the same namespace - value: database:5432/example-api - image: demo-registry:5000/example-api:0.1.0 - imagePullPolicy: IfNotPresent - command: - - sh - - -c - # database.$(POD_NAMESPACE).svc.cluster.local is a long DNS name of a service, accessible from all namespaces - args: - - "dockerize -wait tcp://database.$(POD_NAMESPACE).svc.cluster.local:5432 -timeout 20s java -jar api.jar" - ports: - - name: http - containerPort: 80 - protocol: TCP - livenessProbe: - initialDelaySeconds: 120 - periodSeconds: 120 - timeoutSeconds: 15 - httpGet: - path: / - port: 80 - readinessProbe: - initialDelaySeconds: 100 - periodSeconds: 15 - timeoutSeconds: 15 - httpGet: - path: / - port: 80 - resources: - limits: - memory: "300Mi" - cpu: "500m" diff --git a/.k8s/04-replicasets-readiness-liveness/api-service.yaml b/.k8s/04-replicasets-readiness-liveness/api-service.yaml deleted file mode 100644 index 9a505546..00000000 --- a/.k8s/04-replicasets-readiness-liveness/api-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: api - labels: - component: api -spec: - type: LoadBalancer - ports: - - port: 8080 - targetPort: 80 - protocol: TCP - name: http - selector: - component: api diff --git a/.k8s/04-replicasets-readiness-liveness/db-pvc.yaml b/.k8s/04-replicasets-readiness-liveness/db-pvc.yaml deleted file mode 100644 index 080d81ea..00000000 --- a/.k8s/04-replicasets-readiness-liveness/db-pvc.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: db-pv-claim - labels: - component: db -spec: - # there are several access modes: ReadWriteOnce, ReadOnlyMany, ReadWriteMany, ReadWriteOncePod - # a volume with ReadWriteOnce can be mounted as a read-write by a single node - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/.k8s/04-replicasets-readiness-liveness/db-replicaset.yaml b/.k8s/04-replicasets-readiness-liveness/db-replicaset.yaml deleted file mode 100644 index 4f39d951..00000000 --- a/.k8s/04-replicasets-readiness-liveness/db-replicaset.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: apps/v1 -kind: ReplicaSet -metadata: - name: database-replicaset - labels: - component: db -spec: - replicas: 1 - selector: - matchLabels: - component: db - template: - metadata: - labels: - component: db - spec: - containers: - - name: example-api-database - env: - - name: POSTGRES_DB - value: example-api - - name: POSTGRES_USER - value: root - - name: POSTGRES_PASSWORD - value: root - - name: TZ - value: America/Sao_Paulo - - name: PGTZ - value: America/Sao_Paulo - image: postgres:13 - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 5432 - readinessProbe: - exec: - command: ["sh", "-c", "exec pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] - initialDelaySeconds: 15 - timeoutSeconds: 2 - livenessProbe: - exec: - command: ["sh", "-c", "exec pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] - initialDelaySeconds: 45 - timeoutSeconds: 2 - resources: - limits: - memory: "128Mi" - cpu: "500m" - volumeMounts: - - mountPath: /var/lib/postgresql/data - name: postgredb-data - subPath: postgres - volumes: - - name: postgredb-data - persistentVolumeClaim: - claimName: db-pv-claim diff --git a/.k8s/04-replicasets-readiness-liveness/db-service.yaml b/.k8s/04-replicasets-readiness-liveness/db-service.yaml deleted file mode 100644 index 3a15abd3..00000000 --- a/.k8s/04-replicasets-readiness-liveness/db-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: database - labels: - component: db -spec: - # ClusterIP is a default value, may be omitted - type: ClusterIP - ports: - - port: 5432 - selector: - component: db diff --git a/.k8s/05-deployment-statefulset-configmap-secret/README.md b/.k8s/05-deployment-statefulset-configmap-secret/README.md deleted file mode 100644 index a3f9b918..00000000 --- a/.k8s/05-deployment-statefulset-configmap-secret/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Deployment, StatefulSet, ConfigMap, Secret - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. - -## Initial actions - -Create a namespace and make it default - -``` -kubectl create namespace example-api -kubectl ns example-api - -``` - -## Deployment -``` -kubectl apply -f db-configmap.yaml -kubectl apply -f db-secret.yaml -kubectl apply -f db-service.yaml -kubectl apply -f api-service.yaml -kubectl apply -f db-statefulset.yaml -kubectl apply -f api-deployment.yaml -``` - -## Cleanup - -Delete the namespace created above: - -``` -kubectl delete namespace example-api -``` - -## Advantages - -- Both components are scalable, possiblity to monitor readiness and liveness of components -- Database data is persisted -- Very stable way of how the app connects to DB -- Very stable and reliable way to expose the app outside the cluster -- Configuration parameters are not copy-pasted -- Password is in a secret, we have a way to conceal it from some non-admin users (will be discussed later) -- Persistent volumes per each DB pod -- It is possible to roll out a new version of the app smoothly diff --git a/.k8s/05-deployment-statefulset-configmap-secret/api-deployment.yaml b/.k8s/05-deployment-statefulset-configmap-secret/api-deployment.yaml deleted file mode 100644 index 0dec237d..00000000 --- a/.k8s/05-deployment-statefulset-configmap-secret/api-deployment.yaml +++ /dev/null @@ -1,63 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: api - labels: - component: api -spec: - replicas: 3 - selector: - matchLabels: - component: api - template: - metadata: - labels: - component: api - spec: - containers: - - name: example-api - envFrom: - - configMapRef: - name: db-config - env: - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: db-secret - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: db-secret - key: password - - name: DB_URL - value: $(DB_HOST):$(DB_PORT)/$(DB_NAME) - image: demo-registry:5000/example-api:0.1.0 - imagePullPolicy: IfNotPresent - command: - - sh - - -c - args: - - "dockerize -wait tcp://$(DB_HOST):$(DB_PORT) -timeout 20s java -jar api.jar" - ports: - - name: http - containerPort: 80 - protocol: TCP - livenessProbe: - initialDelaySeconds: 120 - periodSeconds: 120 - timeoutSeconds: 15 - httpGet: - path: / - port: 80 - readinessProbe: - initialDelaySeconds: 100 - periodSeconds: 15 - timeoutSeconds: 15 - httpGet: - path: / - port: 80 - resources: - limits: - memory: "300Mi" - cpu: "500m" diff --git a/.k8s/05-deployment-statefulset-configmap-secret/api-service.yaml b/.k8s/05-deployment-statefulset-configmap-secret/api-service.yaml deleted file mode 100644 index 9a505546..00000000 --- a/.k8s/05-deployment-statefulset-configmap-secret/api-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: api - labels: - component: api -spec: - type: LoadBalancer - ports: - - port: 8080 - targetPort: 80 - protocol: TCP - name: http - selector: - component: api diff --git a/.k8s/05-deployment-statefulset-configmap-secret/db-configmap.yaml b/.k8s/05-deployment-statefulset-configmap-secret/db-configmap.yaml deleted file mode 100644 index 456e26da..00000000 --- a/.k8s/05-deployment-statefulset-configmap-secret/db-configmap.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: db-config -data: - DB_HOST: database - DB_PORT: "5432" - DB_NAME: example-api - TZ: America/Sao_Paulo \ No newline at end of file diff --git a/.k8s/05-deployment-statefulset-configmap-secret/db-secret.yaml b/.k8s/05-deployment-statefulset-configmap-secret/db-secret.yaml deleted file mode 100644 index c07dde9b..00000000 --- a/.k8s/05-deployment-statefulset-configmap-secret/db-secret.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: db-secret -type: Opaque -# values from stringData are converted by means of base64 to data -stringData: - username: root - password: root diff --git a/.k8s/05-deployment-statefulset-configmap-secret/db-service.yaml b/.k8s/05-deployment-statefulset-configmap-secret/db-service.yaml deleted file mode 100644 index 3a15abd3..00000000 --- a/.k8s/05-deployment-statefulset-configmap-secret/db-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: database - labels: - component: db -spec: - # ClusterIP is a default value, may be omitted - type: ClusterIP - ports: - - port: 5432 - selector: - component: db diff --git a/.k8s/05-deployment-statefulset-configmap-secret/db-statefulset.yaml b/.k8s/05-deployment-statefulset-configmap-secret/db-statefulset.yaml deleted file mode 100644 index 3acf8547..00000000 --- a/.k8s/05-deployment-statefulset-configmap-secret/db-statefulset.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: database - labels: - component: db -spec: - replicas: 1 - selector: - matchLabels: - component: db - serviceName: database - template: - metadata: - labels: - component: db - spec: - containers: - - name: example-api-database - env: - - name: POSTGRES_DB - valueFrom: - configMapKeyRef: - name: db-config - key: DB_NAME - - name: POSTGRES_USER - valueFrom: - secretKeyRef: - name: db-secret - key: username - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: db-secret - key: password - - name: TZ - valueFrom: - configMapKeyRef: - name: db-config - key: TZ - - name: PGTZ - value: "$(TZ)" - image: postgres:13 - imagePullPolicy: "IfNotPresent" - ports: - - containerPort: 5432 - readinessProbe: - exec: - command: ["sh", "-c", "exec pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] - initialDelaySeconds: 15 - timeoutSeconds: 2 - livenessProbe: - exec: - command: ["sh", "-c", "exec pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] - initialDelaySeconds: 45 - timeoutSeconds: 2 - resources: - limits: - memory: "128Mi" - cpu: "500m" - volumeMounts: - - mountPath: /var/lib/postgresql/data - name: postgredb-data - subPath: postgres - volumeClaimTemplates: - - metadata: - name: postgredb-data - spec: - # there are several access modes: ReadWriteOnce, ReadOnlyMany, ReadWriteMany, ReadWriteOncePod - # a volume with ReadWriteOnce can be mounted as a read-write by a single node - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi diff --git a/.k8s/06-job-with-minio/README.md b/.k8s/06-job-with-minio/README.md deleted file mode 100644 index ab521e24..00000000 --- a/.k8s/06-job-with-minio/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Simple job with Minio S3 storage - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. -Besides, this example assumes that the [Example 5](../05-deployment-statefulset-configmap-secret) is already deployed and not cleaned up from the cluster. - -## Initial actions -``` -kubectl apply -f s3-secret.yaml -# install Minio via Helm -helm upgrade --install minio minio \ - --repo https://charts.min.io \ - --set existingSecret=s3-secret \ - --set mode=standalone --set resources.requests.memory=100Mi \ - --set persistence.enabled=true \ - --set persistence.size=1Gi \ - --set 'buckets[0].name=backups,buckets[0].policy=none,buckets[0].purge=false' -``` - -## Launch job and wait for its completion - -``` -kubectl apply -f db-backup-job.yaml -kubectl wait --for=condition=complete job/db-backup -``` - -## Cleanup - -``` -helm delete minio -kubectl delete -f s3-secret.yaml -``` diff --git a/.k8s/06-job-with-minio/db-backup-job.yaml b/.k8s/06-job-with-minio/db-backup-job.yaml deleted file mode 100644 index 29e45901..00000000 --- a/.k8s/06-job-with-minio/db-backup-job.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: db-backup -spec: - ttlSecondsAfterFinished: 100 - template: - spec: - containers: - - name: db-backup - envFrom: - - configMapRef: - name: db-config - env: - - name: MODE - value: MANUAL - - name: MANUAL_RUN_FOREVER - value: "FALSE" - - name: CONTAINER_ENABLE_SCHEDULING - value: "FALSE" - - name: CONTAINER_ENABLE_MONITORING - value: "FALSE" - - name: BACKUP_LOCATION - value: S3 - - name: S3_HOST - value: minio:9000 - - name: S3_PROTOCOL - value: http - - name: S3_REGION - # this is just to meet the requirements - value: us-east-1 - - name: S3_PATH - value: '' - - name: S3_BUCKET - value: backups - - name: S3_KEY_ID - valueFrom: - secretKeyRef: - name: s3-secret - key: rootUser - - name: S3_KEY_SECRET - valueFrom: - secretKeyRef: - name: s3-secret - key: rootPassword - - name: DB_TYPE - value: pgsql - - name: DB_USER - valueFrom: - secretKeyRef: - name: db-secret - key: username - - name: DB_PASS - valueFrom: - secretKeyRef: - name: db-secret - key: password - image: tiredofit/db-backup:3.4.2 - command: - - backup-now - restartPolicy: Never diff --git a/.k8s/06-job-with-minio/s3-secret.yaml b/.k8s/06-job-with-minio/s3-secret.yaml deleted file mode 100644 index 2da371ad..00000000 --- a/.k8s/06-job-with-minio/s3-secret.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: s3-secret -type: Opaque -# values from stringData are converted by means of base64 to data -stringData: - rootUser: minio - rootPassword: minio123! diff --git a/.k8s/07-cronjob-with-aws-s3/README.md b/.k8s/07-cronjob-with-aws-s3/README.md deleted file mode 100644 index 2574418a..00000000 --- a/.k8s/07-cronjob-with-aws-s3/README.md +++ /dev/null @@ -1,142 +0,0 @@ -# CronJob with AWS S3 storage - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. -Besides, this example assumes that the [Example 5](../05-deployment-statefulset-configmap-secret) is already deployed and not cleaned up from the cluster. - -There are two scenarios below for initial actions need to be done before applying manifests to Kubernetes: - -- **a)** Using `Localstack` as a substitute for real AWS services (doesn't require using some real AWS account) -- **b)** Using real AWS (as a production-like scenario) - -## **a)** Using Localstack AWS S3 Bucket - -### Initial actions - -Please execute - -``` -kubectl apply -f s3-configmap.yaml -kubectl apply -f s3-secret.yaml -# install Localstack via Helm -helm upgrade --install localstack localstack --repo https://helm.localstack.cloud -f localstack-values.yaml --set service.type=LoadBalancer -``` - -### Deploy CronJob - -``` -kubectl apply -f db-backup-cronjob.yaml -``` - -### Test CronJob - -Either trigger CronJob manually via some tool like `OpenLens` or execute the following: -``` -kubectl create job --from=cronjob/db-backup db-backup-manual-$(openssl rand -hex 3) -``` - -After this either create port forwarding for `localstack` service via some tool like `OpenLens` or execute - -``` -kubectl port-forward svc/localstack 4566:4566 & -disown -``` -and then this please run -``` -export AWS_ACCESS_KEY_ID=local -export AWS_SECRET_ACCESS_KEY=local - -aws --endpoint http://localhost:4566 s3 ls s3://backups --recursive -``` - -You should see all the files for created backups. - -### Cleanup - -``` -helm delete localstack -kubectl delete -f db-backup-cronjob.yaml -kubectl delete -f s3-secret.yaml -kubectl delete -f s3-configmap.yaml -``` - -## **b)** Initial actions for real AWS S3 Bucket - -The prerequisites are - -- to create an AWS account (or to use some already existing one) -- to [install](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) `AWS CLI` (on Manjaro it will be already installed after launching the scripts from the [repo](https://github.com/Alliedium/awesome-linux-config)) -- to [configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) `AWS CLI` to interact with AWS via so called *programmatic access* - -Then you need to perform the following steps: - -* in AWS S3 in some region create a bucket to store backups, its name may be any that is allowed to be chosen (the only restriction in AWS is that this name should not coincide with some already used by someone in AWS) -* in AWS IAM [create](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) a user with programmatic access -* [attach](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) the following inline policy for this user providing full access to the bucket created above (please change `` below by the name of your bucket used for storing backups): - -``` -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::/*" - ] - } - ] -} -``` - -* change values in `s3-secret.yaml` by real values of `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` taken for this user -* change values in `s3-configmap.yaml` by the values according the comments in that file - -After this execute -``` -kubectl apply -f s3-configmap.yaml -kubectl apply -f s3-secret.yaml -``` - -### Deploy CronJob - -``` -kubectl apply -f db-backup-cronjob.yaml -``` - - -### Test CronJob - -Either trigger CronJob manually via some tool like `OpenLens` or execute the following: -``` -kubectl create job --from=cronjob/db-backup db-backup-manual-$(openssl rand -hex 3) -``` - -After this execute - -``` -aws s3 ls s3://backups --recursive - -``` - -You should see all the files for created backups (you can also view them via [console](https://s3.console.aws.amazon.com/s3/home)). - -### Cleanup - -``` -kubectl delete -f db-backup-cronjob.yaml -kubectl delete -f s3-secret.yaml -kubectl delete -f s3-configmap.yaml -``` diff --git a/.k8s/07-cronjob-with-aws-s3/db-backup-cronjob.yaml b/.k8s/07-cronjob-with-aws-s3/db-backup-cronjob.yaml deleted file mode 100644 index 8c8e52e8..00000000 --- a/.k8s/07-cronjob-with-aws-s3/db-backup-cronjob.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: db-backup -spec: - schedule: "0 0 * * *" - jobTemplate: - spec: - template: - spec: - containers: - - name: db-backup - envFrom: - - configMapRef: - name: db-config - - configMapRef: - name: s3-config - env: - - name: MODE - value: MANUAL - - name: MANUAL_RUN_FOREVER - value: "FALSE" - - name: CONTAINER_ENABLE_SCHEDULING - value: "FALSE" - - name: CONTAINER_ENABLE_MONITORING - value: "FALSE" - - name: BACKUP_LOCATION - value: S3 - - name: S3_PATH - value: '' - - name: S3_KEY_ID - valueFrom: - secretKeyRef: - name: s3-secret - key: AWS_ACCESS_KEY_ID - - name: S3_KEY_SECRET - valueFrom: - secretKeyRef: - name: s3-secret - key: AWS_SECRET_ACCESS_KEY - - name: DB_TYPE - value: pgsql - - name: DB_USER - valueFrom: - secretKeyRef: - name: db-secret - key: username - - name: DB_PASS - valueFrom: - secretKeyRef: - name: db-secret - key: password - image: tiredofit/db-backup:3.4.2 - command: - - backup-now - restartPolicy: OnFailure - successfulJobsHistoryLimit: 3 - failedJobsHistoryLimit: 1 diff --git a/.k8s/07-cronjob-with-aws-s3/localstack-values.yaml b/.k8s/07-cronjob-with-aws-s3/localstack-values.yaml deleted file mode 100644 index eb6ee01a..00000000 --- a/.k8s/07-cronjob-with-aws-s3/localstack-values.yaml +++ /dev/null @@ -1,15 +0,0 @@ -debug: true -extraEnvVars: - - name: SERVICES - value: s3 - - name: AWS_ACCESS_KEY_ID - value: local - - name: AWS_SECRET_ACCESS_KEY - value: local - - name: AWS_DEFAULT_REGION - value: us-east-1 - - name: HOSTNAME_EXTERNAL - value: localstack -enableStartupScripts: true -startupScriptContent: | - awslocal s3 mb s3://backups diff --git a/.k8s/07-cronjob-with-aws-s3/s3-configmap.yaml b/.k8s/07-cronjob-with-aws-s3/s3-configmap.yaml deleted file mode 100644 index 71a5ee86..00000000 --- a/.k8s/07-cronjob-with-aws-s3/s3-configmap.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: s3-config -data: - S3_HOST: localstack:4566 # use s3.amazonaws.com for real AWS S3 - S3_PROTOCOL: http # use https for real AWS S3 - S3_REGION: us-east-1 # adjust region if necessary - S3_BUCKET: backups # put the real name of S3 bucket diff --git a/.k8s/07-cronjob-with-aws-s3/s3-secret.yaml b/.k8s/07-cronjob-with-aws-s3/s3-secret.yaml deleted file mode 100644 index f999315a..00000000 --- a/.k8s/07-cronjob-with-aws-s3/s3-secret.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: s3-secret -type: Opaque -# values from stringData are converted by means of base64 to data -stringData: - AWS_ACCESS_KEY_ID: "local" - AWS_SECRET_ACCESS_KEY: "local" diff --git a/.k8s/08-pgadmin/README.md b/.k8s/08-pgadmin/README.md deleted file mode 100644 index 258d9f5c..00000000 --- a/.k8s/08-pgadmin/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Installing pgAdmin - -This example is based on the blog [How to Deploy pgAdmin in Kubernetes](https://www.enterprisedb.com/blog/how-deploy-pgadmin-kubernetes). - -``` -kubectl create namespace pgadmin --dry-run=client -o yaml | kubectl apply -f - -kubectl -n pgadmin apply -f pgadmin-secret.yaml -kubectl -n pgadmin apply -f pgadmin-configmap.yaml -kubectl -n pgadmin apply -f pgadmin-service.yaml -kubectl -n pgadmin apply -f pgadmin-statefulset.yaml -``` - -To remove the resources it is sufficient to execute -``` -kubectl delete namespace pgadmin -``` diff --git a/.k8s/08-pgadmin/pgadmin-configmap.yaml b/.k8s/08-pgadmin/pgadmin-configmap.yaml deleted file mode 100644 index 87cd22c7..00000000 --- a/.k8s/08-pgadmin/pgadmin-configmap.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: pgadmin-config -data: - servers.json: | - { - "Servers": { - "1": { - "Name": "Example API PostgreSQL DB", - "Group": "Servers", - "Port": 5432, - "Username": "root", - "Host": "database.example-api.svc.cluster.local", - "SSLMode": "prefer", - "MaintenanceDB": "example-api" - } - } - } diff --git a/.k8s/08-pgadmin/pgadmin-secret.yaml b/.k8s/08-pgadmin/pgadmin-secret.yaml deleted file mode 100644 index 9b19b263..00000000 --- a/.k8s/08-pgadmin/pgadmin-secret.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: pgadmin-secret -type: Opaque -# values from stringData are converted by means of base64 to data -stringData: - pgadmin-password: "123" diff --git a/.k8s/08-pgadmin/pgadmin-service.yaml b/.k8s/08-pgadmin/pgadmin-service.yaml deleted file mode 100644 index 27d7bad1..00000000 --- a/.k8s/08-pgadmin/pgadmin-service.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: pgadmin-service -spec: - ports: - - protocol: TCP - port: 80 - targetPort: http - selector: - app: pgadmin - type: NodePort diff --git a/.k8s/08-pgadmin/pgadmin-statefulset.yaml b/.k8s/08-pgadmin/pgadmin-statefulset.yaml deleted file mode 100644 index 83138059..00000000 --- a/.k8s/08-pgadmin/pgadmin-statefulset.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: pgadmin -spec: - serviceName: pgadmin-service - podManagementPolicy: Parallel - replicas: 1 - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app: pgadmin - template: - metadata: - labels: - app: pgadmin - spec: - terminationGracePeriodSeconds: 10 - containers: - - name: pgadmin - image: dpage/pgadmin4:6.14 - imagePullPolicy: Always - env: - - name: PGADMIN_DEFAULT_EMAIL - value: pgadmin@pgadmin.com - - name: PGADMIN_DEFAULT_PASSWORD - valueFrom: - secretKeyRef: - name: pgadmin-secret - key: pgadmin-password - ports: - - name: http - containerPort: 80 - protocol: TCP - volumeMounts: - # get file from ConfigMap - - name: pgadmin-config - mountPath: /pgadmin4/servers.json - subPath: servers.json - readOnly: true - - name: pgadmin-data - mountPath: /var/lib/pgadmin - volumes: - # volumes may be generated from ConfigMaps, Secrets, etc. - - name: pgadmin-config - configMap: - name: pgadmin-config - volumeClaimTemplates: - - metadata: - name: pgadmin-data - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi diff --git a/.k8s/09-metrics-view-via-grafana/README.md b/.k8s/09-metrics-view-via-grafana/README.md deleted file mode 100644 index 20dc1949..00000000 --- a/.k8s/09-metrics-view-via-grafana/README.md +++ /dev/null @@ -1,304 +0,0 @@ -# Installing PostgreSQL with metrics view via Grafana - -## Prerequisites - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. -Besides, - -- Delete the namespace ```example-api``` in the case it exists - -``` -kubectl delete namespace example-api -``` - -- Install [Helm](https://helm.sh/) if it is not installed yet. On Manjaro Linux, run the command - -``` -yay -S helm --noconfirm -``` - -Otherwise, see [installation instructions](https://helm.sh/docs/intro/install/) - - -## Steps - -### 1. Install PostgreSQL via Helm Chart with postgres_exporter tool - -[Helm](https://helm.sh/) is a convenient tool, like a package manager, -for deploying applications with complex structure in Kubernetes, -see also [github](https://github.com/helm/helm#helm) - -[PostgreSQL](https://www.postgresql.org/) -is the world's most advanced Open Source Relational Database - -PostgreSQL Server Exporter ([postgres_exporter](https://github.com/prometheus-community/postgres_exporter)) is an exporter of PostgreSQL server metrics for -[Prometheus](https://prometheus.io/docs/introduction/overview/). - -**From CLI** - -``` -helm repo add bitnami https://charts.bitnami.com/bitnami -``` -``` -helm repo update -``` - -[Bitnami](https://bitnami.com/) -is a leading provider of prepackaged open source software that runs natively in various platforms, including the major public clouds, laptops, -and [Kubernetes](https://bitnami.com/stacks/helm) - -``` -helm upgrade --install postgresql bitnami/postgresql \ - --namespace example-api --create-namespace \ - --set auth.database=example-api \ - --set metrics.enabled=true \ - --cleanup-on-fail --wait -``` - -Namespace ```example-api``` should be created, in which there should be created also resources - -- Pod ```postgresql-0``` -- StatefulSet ```postgresql``` -- one ConfigMap -- two Secrets -- three Services -- PersistentVolumeClaim ```data-postgresql-0``` -- Helm Release ```postgresql``` - -### 2. Install the Spring Boot API - -**From CLI** - -Activate the namespace ```example-api``` - -``` -kubectl ns example-api -``` - -Change to the example directory and apply the manifests - -``` -kubectl apply -f db-configmap.yaml -kubectl apply -f api-deployment.yaml -kubectl apply -f api-service.yaml - -``` - -### 3. Check the Spring Boot API installation - -**From OpenLens** - -Check that the Pod ```api-``` in the namespace ```example-api``` is running - - -**From browser on Local machine** - -Open the URL ```http://127.0.0.1:7080``` - -The 'Simple Spring Boot API' page should be opened - - -### 4. Check the postgres_exporter installation - -**From OpenLens** - -Network/Services --> Namespace: ```example-api```, Service: ```postgresql-metrics``` - -Forward port, open in browser - -The page 'Postgres exporter' with 'Metrics' link should be displayed - -Open the 'Metrics' link, metrics information in the text form should be displayed - -Find ```pg_up``` in the text, there should be a line ```pg_up 1``` meaning that the last scrape of metrics from PostgreSQL was able to connect to the server - - -### 5. Prometheus should be installed via Lens metrics. - -**From OpenLens** - -Click in the left pane ```k3d-demo``` cluster, then in drop-down list choose 'Settings' - -Settings page will be opened - -In the left pane of the page, choose 'Lens Metrics' - -The tab 'Lens Metrics' will be opened - -Make sure that all the three switches are ON. - -**Remark.** If you are making the steps **not** for the first time, renew the Prometheus instalation: -- switch OFF all the switches -- click 'Uninstall' button -- wait untill the objects are uninstalled -- switch ON all the switches -- click 'Apply' button - -Prometheus can be seen in Workloads/StatefulSets. - - -### 6. Configure Prometheus to scrape the metircs from postgres_exporter - -**From CLI** - -Install [yq](https://github.com/mikefarah/yq) package - -``` -yay -S go-yq --noconfirm -``` - -Add postgres_exporter scrape settings to Prometheus configuration map - -- Put to a variable Prometheus configuration map mainfest supplemented by postgres_exporter scrape settings - -``` -PROMETHEUS_YAML=$(kubectl -n lens-metrics get configmap/prometheus-config -o "jsonpath={.data['prometheus\.yaml']}" | yq eval '.scrape_configs += [{"job_name": "postgres-exporter", "kubernetes_sd_configs": [{"role": "service", "namespaces": {"names": ["example-api"]}}], "relabel_configs": [{"source_labels": ["__meta_kubernetes_service_annotation_prometheus_io_scrape"], "action": "keep", "regex": true}]}]' - | sed "s|\"|'|g") -``` - -- Apply the manifest from the variable - -``` -kubectl -n lens-metrics get configmap/prometheus-config -o yaml | yq eval '.data."prometheus.yaml" = "'"${PROMETHEUS_YAML}"'"' - | kubectl apply -f - -``` - -### 7. Check that postgres_exporter scrape settings were added to Prometheus - -**From OpenLens** - -Config/ConfigMaps, click on ```prometheus-config```, see YAML. - -There should be a section at the bottom - -``` - - job_name: postgres-exporter - kubernetes_sd_configs: - - role: service - namespaces: - names: - - example-api - relabel_configs: - - source_labels: - -__meta_kubernetes_service_annotation_prometheus_io_scrape - action: keep - regex: true - ``` - - -### 8. Scale down and up Prometheus StatefulSet - -**From CLI** - -``` -kubectl -n lens-metrics scale --replicas=0 statefulset/prometheus -``` - -``` -kubectl -n lens-metrics scale --replicas=1 statefulset/prometheus -``` - - -### 9. Check that Prometheus is scraping the metircs from postgres_exporter - -**From OpenLens** - -Network/Services --> Namespace: ```lens-metrics```, Service: ```prometheus``` - -Forward port, open in browser - -The Prometheus page will be opened - -In the top menu click 'Status' and choose 'Targets' in drop-down list - -The 'Targets' page will be opened - -Click 'Collapse All' button in menu below the 'Targets' title - -See that ```postgres-exporter``` target is in the page - - -### 10. Install Grafana - -[Grafana](https://github.com/grafana/grafana) is an open-source platform for monitoring and observability. Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored - -**From OpenLens** - -Create new namespace ```grafana``` - -Install Grafana to the namespace ```grafana``` via Helm chart - - -### 11. Get Grafana admin's password from the secret - -Config/Secrets --> Namespace: ```grafana```, -click on the secret ```grafana--admin``` - -The right pane with secret properties will be opened - -On the right pane, find the field ```GF_SECURITY_ADMIN_PASSWORD``` - -Click at 'Show' button to the right of the field - -Copy field value to the clipboard - - -### 12. Open Grafana in browser - -Network/Services --> Namespace: ```grafana```, Service: ```grafana-``` - -Forward port, open in browser - -Enter login 'admin', paste the password taken from the secret ```grafana--admin``` on the previous step - - -### 13. Add Prometheus datasource to Grafana - -**On the Grafana page in browser** - -Hover the mouse pointer on gear wheel sign at the bottom part of the left toolbar - -Click 'Data sources' record - -Configuration page, tab 'Data sources' will be opened - -Click 'Add data source' button - -Time series databases list will be opened - -Choose 'Prometheus' - -The form 'Data sources / Prometheus', tab 'Settings' will be opened - -Enter the URL: ```http://prometheus.lens-metrics.svc.cluster.local``` - -Click 'Save & test' button at the bottom of the form - -Wait until the field above the button displays record 'Data source is working' - - -### 14. Import PostgreSQL Dashboard to Grafana - -Hover the mouse pointer on four squares sign at the top part of the left toolbar - -Click 'Dashboards' record - -Dashboards page, tab 'Browse' will be opened - -At the button 'New' open the drop-down list and choose 'Import' - -Enter the URL ```https://grafana.com/grafana/dashboards/9628-postgresql-database/``` into 'Import via grafana.com' field - -Click 'Load' button - -In the field 'DS_PROMETHEUS' at the bottom, choose the 'Prometheus' data source - -Click 'Import' button - -**You are done**, the 'PostgreSQL Database' dashboard with the metrics will be displayed - -## Cleanup - -``` -kubectl delete namespace example-api -kubectl delete namespace grafana -kubectl delete namespace lens-metrics -``` diff --git a/.k8s/09-metrics-view-via-grafana/api-deployment.yaml b/.k8s/09-metrics-view-via-grafana/api-deployment.yaml deleted file mode 100644 index 93f2561b..00000000 --- a/.k8s/09-metrics-view-via-grafana/api-deployment.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: api - labels: - component: api -spec: - replicas: 1 - selector: - matchLabels: - component: api - template: - metadata: - labels: - component: api - spec: - containers: - - name: example-api - envFrom: - - configMapRef: - name: db-config - env: - - name: DB_USERNAME - value: postgres - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgresql - key: postgres-password - - name: DB_URL - value: $(DB_HOST):$(DB_PORT)/$(DB_NAME) - image: demo-registry:5000/example-api:0.1.0 - imagePullPolicy: IfNotPresent - command: - - sh - - -c - args: - - "dockerize -wait tcp://$(DB_HOST):$(DB_PORT) -timeout 20s java -jar api.jar" - ports: - - name: http - containerPort: 80 - protocol: TCP - livenessProbe: - initialDelaySeconds: 120 - periodSeconds: 120 - timeoutSeconds: 15 - httpGet: - path: / - port: 80 - readinessProbe: - initialDelaySeconds: 100 - periodSeconds: 15 - timeoutSeconds: 15 - httpGet: - path: / - port: 80 - resources: - limits: - memory: "300Mi" - cpu: "500m" diff --git a/.k8s/09-metrics-view-via-grafana/api-service.yaml b/.k8s/09-metrics-view-via-grafana/api-service.yaml deleted file mode 100644 index 9a505546..00000000 --- a/.k8s/09-metrics-view-via-grafana/api-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: api - labels: - component: api -spec: - type: LoadBalancer - ports: - - port: 8080 - targetPort: 80 - protocol: TCP - name: http - selector: - component: api diff --git a/.k8s/09-metrics-view-via-grafana/db-configmap.yaml b/.k8s/09-metrics-view-via-grafana/db-configmap.yaml deleted file mode 100644 index 190dc01c..00000000 --- a/.k8s/09-metrics-view-via-grafana/db-configmap.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: db-config -data: - DB_HOST: postgresql - DB_PORT: "5432" - DB_NAME: example-api \ No newline at end of file diff --git a/.k8s/10-zalando-postgres-ha-operator/README.md b/.k8s/10-zalando-postgres-ha-operator/README.md deleted file mode 100644 index 23b72a68..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/README.md +++ /dev/null @@ -1,251 +0,0 @@ -# Installing scalable PostgreSQL via Kubernetes operator, implementing Helm chart for Spring Boot API application - -## Prerequisites - -Please follow the steps from [Prerequisites](../README.md#prerequisites) prior to executing the commands below. Besides, - -- Delete the namespace ```example-api``` in the case it exists -``` -kubectl delete namespace example-api -``` - -- Add two nodes (agents) to the cluster - -``` -k3d node create demo-agent -c demo --replicas 2 -``` - -**Remark**. -One can create a new cluster with three server nodes for this example instead of adding two agent nodes as describbed above. To do this, one can run the command - -``` -k3d cluster create demo-10 -s 3 --k3s-arg "--no-deploy=traefik@server:*" --registry-create demo-10-registry:0.0.0.0:22345 --port 1080:8080@loadbalancer -``` - -In contrast to the demo cluster created in [Prerequisites](../README.md#prerequisites), where [Kubernetes Control Plane](https://kubernetes.io/docs/concepts/overview/components/) is only on one node, the new cluster will have Kubernetes Control Plane on all nodes. -Please take into account that while using the latter cluster you need to adjust all the instructions given in [Prerequisites](../README.md#prerequisites) as well as those pointed below by replacing ports ```12345``` and ```7080``` by ```22345``` and ```1080```, respectively, -as well as `demo-registry:5000` by `demo-10-registry:5000`. - - -- Get the cluster nodes - -``` -k3d node list -kubectl get nodes -``` - -## Steps - - -### 1. Install ```postgres-operator``` via Helm Chart - -``` -helm repo add postgres-operator-charts https://opensource.zalando.com/postgres-operator/charts/postgres-operator -helm repo update -helm upgrade --install --cleanup-on-fail postgres-operator postgres-operator-charts/postgres-operator --namespace example-api --create-namespace --set configKubernetes.enable_pod_antiaffinity=true --wait -kubectl ns example-api -``` - -The operator and all its resources will be installed to namespace ```example-api```, which will be created if necessary. This namespace is then made active. - - -### 2. See new Custom Resource Definitions - -**From CLI** - -``` -kubectl get crds -``` - -**From OpenLens** - -Custom Resources / acid.zalan.do / postgresql - - -### 3. Create new PostgreSQL cluster - -**From CLI** - -``` -kubectl apply -f db-postgresql.acid.zalan.do.yaml -``` - - -### 4. Watch how PostgreSQL cluster is creating - -**From CLI** - -``` -kubectl get statefulsets -kubectl get pods -kubectl get pvc -kubectl get crd postgresqls.acid.zalan.do -o go-template="{{.spec.names.kind}} {{.spec.names.plural}} " -kubectl get postgresql -kubectl describe postgresql acid-pg-demo -``` - -**From OpenLens** - -- Workloads / StatefulSets -- Workloads / Pods -- Storage / Persistent Volume Claims -- Custom Resources / acid.zalan.do / postgresql - -### 5. Look at PostgreSQL cluster manifest via Postgres Operator UI - -**Install ```postgres-operator-ui``` via Helm Chart** - -``` -helm repo add postgres-operator-ui-charts https://opensource.zalando.com/postgres-operator/charts/postgres-operator-ui -helm repo update -helm upgrade --install --cleanup-on-fail postgres-operator-ui postgres-operator-ui-charts/postgres-operator-ui -f postgres-operator-ui_values.yaml --namespace example-api --create-namespace --wait -``` - -**Remark**. -Value of ```envs.targetNamespace``` changed to ```'*'``` in the file ```postgres-operator-ui_values.yaml``` - - -**Open ```postgres-operator-ui``` in browser** - -Network / Services --> namespace: 'example-api', service: 'postgres-operator-ui' - -Forward port, open in browser the web console - -**From browser on local machine** - -Go to Tab 'PostgreSQL clusters' - -Click at 'acid-pg-demo' - -See the manifest in the left pane 'Cluster YAML definition' - -**Remark**. From the tab 'New cluster' one can create new PostgreSQL cluster instead of applying manifest from CLI (Step 3) - -### 6. Install Spring Boot API - -``` -helm upgrade --install --cleanup-on-fail springboot-api ./springboot-api --namespace example-api --create-namespace --set service.type=LoadBalancer --wait -``` - -**Remark**. -In fact, we can look at the manifests to be applied beforehand, this is done as follows: -``` -helm template springboot-api ./springboot-api --namespace example-api --create-namespace --set service.type=LoadBalancer --wait --skip-tests -``` -If we remove ```--skip-tests``` from the command above, we will see also resources for tests included into the chart itself (see below on how to launch these tests). - -In fact, an initial prototype for the Helm chart contained in ```./springboot-api``` folder was created by the command -``` -helm create springboot-api -``` - -### 7. Check Spring Boot API installation - -The three pods ```springboot-api-``` in the namespace ```example-api``` should be already running - -**From browser on Local machine** - -Open the URL ```http://127.0.0.1:7080``` - -The 'Simple Spring Boot API' page should be opened - -**By Helm chart itself** - -``` -helm test springboot-api --namespace example-api -``` - -### 8. Release new version of Spring Boot API - - -``` -APP_VERSION=0.1.1 -docker build --file ../../api/docker/Dockerfile.prod -t localhost:12345/example-api:${APP_VERSION} ../../api -docker push localhost:12345/example-api:${APP_VERSION} -helm package --app-version ${APP_VERSION} --version ${APP_VERSION} ./springboot-api -helm upgrade --install --cleanup-on-fail springboot-api ./springboot-api-${APP_VERSION}.tgz --namespace example-api --create-namespace --reuse-values --timeout 15m --wait -``` - -### 9. Rollback to previous release of Spring Boot API - -``` -helm history springboot-api -helm rollback springboot-api -rm ./springboot-api-${APP_VERSION}.tgz -helm history springboot-api -``` - -### 10. Install pgadmin4 via Helm chart - -**From CLI** - -``` -helm repo add runix https://helm.runix.net -helm repo update -helm upgrade --install --cleanup-on-fail pgadmin4 runix/pgadmin4 -f pgadmin4_values.yaml --namespace pgadmin4 --create-namespace --wait -``` - -**Remark**. -If you want to look how the chart above looks like, you may pull this chart by the command -``` -helm pull runix/pgadmin4 -``` -and look inside the archive that should appear immediately after. - -**Remark**. -The [default](https://artifacthub.io/packages/helm/runix/pgadmin4#configuration) value for ```env.email``` is ```chart@example.local```. -It has domain ```local```, which is considered by pgadmin as non-safe, so pgdamin doesn't start. -In the file ```pgadmin4_values.yaml``` this value is changed to ```pgadmin@letmein.org``` - -**Check credentials** - -Helm / Releases --> namespace: 'pgadmin4' - -Click on 'pgadmin4' - -On the left pane, in 'Values' field, check that the value of -- ```env.email``` is ```pgadmin@letmein.org``` -- ```env.password``` is ```123``` - -**Remark**. If there are shown the values from previous Postgres operator, -check and uncheck several times 'User-supplied values only'. -It's possibly a bug in OpenLens - - -### 11. Open pgadmin4 web console on local machine - -Network / Services --> namespace: 'pgadmin4', click on 'pgadmin4' - -Forward port, open in browser - -Log in via ```env.email``` and ```env.password``` values checked in the previous step - - -### 12. Connect to server - -**From OpenLens** - -Config / Secrets --> namespace: 'example-api', click on 'postgres.' (with the name of the Postgres cluster) - -Get username, password, full DNS name of the service -(```acid-pg-demo.example-api.svc.cluster.local``` in our example) - -The password for ```postgres``` user may be taken also by executing the command -``` -kubectl -n example-api get secret postgres.acid-pg-demo.credentials.postgresql.acid.zalan.do --template='{{.data.password}}' | base64 -d -``` - - -**From pgadmin web console** - -Create a new server connection - -Use username, password, full DNS name obtained above. - - -## Cleanup - -``` -kubectl delete namespace pgadmin4 -kubectl delete namespace example-api -``` diff --git a/.k8s/10-zalando-postgres-ha-operator/db-postgresql.acid.zalan.do.yaml b/.k8s/10-zalando-postgres-ha-operator/db-postgresql.acid.zalan.do.yaml deleted file mode 100644 index 31854488..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/db-postgresql.acid.zalan.do.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: acid.zalan.do/v1 -kind: postgresql - -metadata: - name: acid-pg-demo - namespace: example-api - labels: - team: acid - -spec: - teamId: acid - postgresql: - version: "14" - numberOfInstances: 3 - enableConnectionPooler: true - volume: - size: 1Gi - users: - testuser: [] - databases: - testdb: testuser - allowedSourceRanges: [] - # IP ranges to access your cluster go here - - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - cpu: 500m - memory: 1000Mi diff --git a/.k8s/10-zalando-postgres-ha-operator/pgadmin4_values.yaml b/.k8s/10-zalando-postgres-ha-operator/pgadmin4_values.yaml deleted file mode 100644 index b6a6bb2c..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/pgadmin4_values.yaml +++ /dev/null @@ -1,3 +0,0 @@ -env: - email: pgadmin@letmein.org - password: "123" diff --git a/.k8s/10-zalando-postgres-ha-operator/postgres-operator-ui_values.yaml b/.k8s/10-zalando-postgres-ha-operator/postgres-operator-ui_values.yaml deleted file mode 100644 index 0b43d4a5..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/postgres-operator-ui_values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -envs: - targetNamespace: '*' diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/.helmignore b/.k8s/10-zalando-postgres-ha-operator/springboot-api/.helmignore deleted file mode 100644 index 0e8a0eb3..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/Chart.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/Chart.yaml deleted file mode 100644 index e548691d..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: springboot-api -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: 0.1.0 diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/NOTES.txt b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/NOTES.txt deleted file mode 100644 index d67c30f8..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "springboot-api.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "springboot-api.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "springboot-api.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "springboot-api.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/_helpers.tpl b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/_helpers.tpl deleted file mode 100644 index bec75a9d..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "springboot-api.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "springboot-api.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "springboot-api.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "springboot-api.labels" -}} -helm.sh/chart: {{ include "springboot-api.chart" . }} -{{ include "springboot-api.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "springboot-api.selectorLabels" -}} -app.kubernetes.io/name: {{ include "springboot-api.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "springboot-api.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "springboot-api.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/db-configmap.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/db-configmap.yaml deleted file mode 100644 index 2492a1de..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/db-configmap.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "springboot-api.fullname" . }}-db-configmap - labels: - {{- include "springboot-api.labels" . | nindent 4 }} -data: - DB_HOST: {{ .Values.database.host }} - DB_PORT: {{ .Values.database.port | quote }} - DB_NAME: {{ .Values.database.name }} diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/deployment.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/deployment.yaml deleted file mode 100644 index 9ffed106..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/deployment.yaml +++ /dev/null @@ -1,88 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "springboot-api.fullname" . }} - labels: - {{- include "springboot-api.labels" . | nindent 4 }} - component: api -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "springboot-api.selectorLabels" . | nindent 6 }} - component: api - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "springboot-api.selectorLabels" . | nindent 8 }} - component: api - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "springboot-api.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - envFrom: - - configMapRef: - name: {{ include "springboot-api.fullname" . }}-db-configmap - env: - - name: DB_USERNAME - value: {{ .Values.database.user }} - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.database.existingPasswordSecret.name }} - key: {{ .Values.database.existingPasswordSecret.key }} - - name: DB_URL - value: $(DB_HOST):$(DB_PORT)/$(DB_NAME) - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: - - sh - - -c - args: - - "dockerize -wait tcp://$(DB_HOST):$(DB_PORT) -timeout 20s java -jar api.jar" - ports: - - name: http - containerPort: 80 - protocol: TCP - livenessProbe: - initialDelaySeconds: 120 - periodSeconds: 120 - timeoutSeconds: 15 - httpGet: - path: / - port: http - readinessProbe: - initialDelaySeconds: 100 - periodSeconds: 15 - timeoutSeconds: 15 - httpGet: - path: / - port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/hpa.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/hpa.yaml deleted file mode 100644 index ec6edc5d..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "springboot-api.fullname" . }} - labels: - {{- include "springboot-api.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "springboot-api.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/ingress.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/ingress.yaml deleted file mode 100644 index d2403ffa..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/ingress.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "springboot-api.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "springboot-api.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/service.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/service.yaml deleted file mode 100644 index 3984ffc2..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "springboot-api.fullname" . }} - labels: - {{- include "springboot-api.labels" . | nindent 4 }} - component: api -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "springboot-api.selectorLabels" . | nindent 4 }} - component: api diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/serviceaccount.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/serviceaccount.yaml deleted file mode 100644 index a0712313..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "springboot-api.serviceAccountName" . }} - labels: - {{- include "springboot-api.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/tests/test-connection.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/tests/test-connection.yaml deleted file mode 100644 index 25f3e081..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "springboot-api.fullname" . }}-test-connection" - labels: - {{- include "springboot-api.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "springboot-api.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/.k8s/10-zalando-postgres-ha-operator/springboot-api/values.yaml b/.k8s/10-zalando-postgres-ha-operator/springboot-api/values.yaml deleted file mode 100644 index 619242d6..00000000 --- a/.k8s/10-zalando-postgres-ha-operator/springboot-api/values.yaml +++ /dev/null @@ -1,92 +0,0 @@ -# Default values for springboot-api. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 3 - -database: - host: acid-pg-demo - port: 5432 - name: testdb - user: postgres - existingPasswordSecret: - name: postgres.acid-pg-demo.credentials.postgresql.acid.zalan.do - key: password - -image: - registry: demo-registry:5000 - repository: example-api - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "" - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -podAnnotations: {} - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 8080 - -ingress: - enabled: false - className: "" - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: - - path: / - pathType: ImplementationSpecific - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 500m - memory: 300Mi - # requests: - # cpu: 100m - # memory: 128Mi - -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -nodeSelector: {} - -tolerations: [] - -affinity: {} diff --git a/.k8s/README.md b/.k8s/README.md deleted file mode 100644 index 2de8e6cd..00000000 --- a/.k8s/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# 1. Prerequisites - -- Create a cluster with a registry - -``` -k3d cluster create demo --k3s-arg "--no-deploy=traefik@server:*" --registry-create demo-registry:0.0.0.0:12345 --port 7080:8080@loadbalancer -``` - -- Build and push a Docker image (it is assumed that current folder is `.k8s`) - - -``` -docker build --file ../api/docker/Dockerfile.prod -t localhost:12345/example-api:0.1.0 ../api -docker push localhost:12345/example-api:0.1.0 -``` - -# 2. Deployment variants - -| Example | Details | -|------|-------| -| [Example 1](./01-single-pod-with-ephemeral-volume) | Single pod with sidecar, no persistence due to ephemeral volume | -| [Example 2](./02-pods-with-ephemeral-volume) | Only pods, no persistence due to ephemeral volume | -| [Example 3](./03-services-with-pvc) | Added services and persistence via PVC | -| [Example 4](./04-replicasets-readiness-liveness) | ReplicaSets, Readiness and Liveness probes | -| [Example 5](./05-deployment-statefulset-configmap-secret) | Deployment, StatefulSet, ConfigMap, Secret | - -## Remarks concerning secrets - -For the last example above as well as for some examples below we use manifests for [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/). And secret values in these manifests are hard coded. But this is just for simplicity, i.e. it is not how secrets are to be handled in real production. The reason is that these hard coded values can be seen by any person having access to respective Git repository which contradicts with an idea of secrets being a secret. - -There are a few different ways to deal with the problem. Here are just two of them (without claiming that these two are the only onces that should be used): - -* Using [Sealed Secrets](https://sealed-secrets.netlify.app/). Here we store in Git a special kind of Kubernetes resources named `SealedSecret`. Its manifest contains *encrypted* values that can be decrypted only by the controller running in the target Kubernetes cluster and nobody else (not even the original author). This way only the controller is able to obtain the original Secret from the SealedSecret (within the cluster). [Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets) use [asymmetric cryptography](https://cheapsslsecurity.com/blog/what-is-asymmetric-encryption-understand-with-simple-examples/) to encrypt secrets with a public key while the private key used for decryption is only known to the controller. -* Using [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) along with [Kubernetes](https://docs.aws.amazon.com/eks/latest/userguide/manage-secrets.html). We can [use a CSI driver](https://docs.aws.amazon.com/secretsmanager/latest/userguide/integrating_csi_driver.html), namely its AWS-based implementation called [AWS Secrets and Configuration Provider ](https://github.com/aws/secrets-store-csi-driver-provider-aws) deployed inside [AWS EKS](https://aws.amazon.com/eks/) Kubernetes cluster (thus, this is an example of how this may be done in production in a cloud). Here we store in Git a special kind of Kubernetes resources named [`SecretProviderClass`](https://docs.aws.amazon.com/secretsmanager/latest/userguide/integrating_csi_driver.html#integrating_csi_driver_SecretProviderClass). Its manifest contains information about your secrets and how to display them in the Amazon EKS pod which references these secrets. And, for example, it is possible to [sync]((https://secrets-store-csi-driver.sigs.k8s.io/topics/sync-as-kubernetes-secret.html)) secrets as a Kubernetes Secret. Another option is to [mount](https://docs.aws.amazon.com/secretsmanager/latest/userguide/integrating_csi_driver_tutorial.html) secrets as files on the pod filesystem. - -Both of these approaches allow us to avoid storing original secret values *directly* inside manifests. However there are cases when it is acceptable to store `Secret` manifests (along with the original secret values) directly inside Git repository. This is when manifests are used only for development purposes (some experiments for instance) and are designed for being deployed in a development cluster and not in production. However the main problem with such an approach is that it can lead to having two sources of truth (read two sets of manifests) - one for development and one for production. In our opinion such approach should be avoided as much as possible (instead we should use production ways to deal with secretes even for development). With all that being said, when such *insecure* is used all the secret values may be shared by developers and the secret values are not *really* hidden. To make the rest of the manifests as environment agnostic as possible we should use [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) for deploying them to comply with production configurations. Pods just reference these secrets not knowing what their sources are thus making the rest of the manifests the same both for both development and production. - -For more details see [Good practices for Kubernetes Secrets](https://kubernetes.io/docs/concepts/security/secrets-good-practices/) and the blog [How to use AWS Secrets & Configuration Provider with your Kubernetes Secrets Store CSI driver](https://aws.amazon.com/ru/blogs/security/how-to-use-aws-secrets-configuration-provider-with-kubernetes-secrets-store-csi-driver/). - -# 3. Backup jobs configuration variants - -Both examples below assume that the [Example 5](./05-deployment-statefulset-configmap-secret) is already deployed and not cleaned up from the cluster. - -| Example | Details | -|------|-------| -| [Example 6](./06-job-with-minio) | Simple job with Minio S3 storage | -| [Example 7](./07-cronjob-with-aws-s3) | CronJob with AWS S3 storage | - -# 4. Installing useful tools in Kubernetes - -| Example | Details | -|------|-------| -| [Example 8](./08-pgadmin) | Installing pgAdmin | - -# 5. Using aready existing Helm charts and operators, implementing new Helm charts - -| Example | Details | -|------|-------| -| [Example 9](./09-metrics-view-via-grafana) | Installing PostgreSQL with metrics view via Grafana | -| [Example 10](./10-zalando-postgres-ha-operator) | Installing scalable PostgreSQL via Kubernetes operator, implementing Helm chart for Spring Boot API application | diff --git a/api/codestyle/checkstyle_checks.xml b/api/codestyle/checkstyle_checks.xml new file mode 100644 index 00000000..807b80c7 --- /dev/null +++ b/api/codestyle/checkstyle_checks.xml @@ -0,0 +1,194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/api/codestyle/checkstyle_suppressions.xml b/api/codestyle/checkstyle_suppressions.xml new file mode 100644 index 00000000..40f4001a --- /dev/null +++ b/api/codestyle/checkstyle_suppressions.xml @@ -0,0 +1,7 @@ + + + + + \ No newline at end of file diff --git a/api/codestyle/sun_checks.xml b/api/codestyle/sun_checks.xml new file mode 100644 index 00000000..4b0114e7 --- /dev/null +++ b/api/codestyle/sun_checks.xml @@ -0,0 +1,186 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/api/docker/Dockerfile.prod b/api/docker/Dockerfile.prod index b6ee3dc5..fa82cc3e 100755 --- a/api/docker/Dockerfile.prod +++ b/api/docker/Dockerfile.prod @@ -1,4 +1,5 @@ -FROM maven:3.8.5-openjdk-17 as BUILDER +FROM powerman/dockerize:0.19.0 AS DOCKERIZE +FROM maven:3.9.0-eclipse-temurin-17-alpine as BUILDER WORKDIR /usr/src/app @@ -7,22 +8,18 @@ COPY ./src ./src RUN --mount=type=cache,target=/root/.m2 mvn package -DskipTests -FROM openjdk:17-alpine +FROM eclipse-temurin:17.0.6_10-jre-alpine WORKDIR /usr/src/app ENV SERVER_PORT=80 ENV DB_SHOW_SQL=false -ENV DOCKERIZE_VERSION v0.6.1 +COPY --from=DOCKERIZE /usr/local/bin/dockerize /usr/local/bin/ COPY --from=BUILDER /usr/src/app/target/*.jar ./api.jar RUN apk add --no-cache openssl -RUN wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-alpine-linux-amd64-$DOCKERIZE_VERSION.tar.gz \ - && tar -C /usr/local/bin -xzvf dockerize-alpine-linux-amd64-$DOCKERIZE_VERSION.tar.gz \ - && rm dockerize-alpine-linux-amd64-$DOCKERIZE_VERSION.tar.gz - EXPOSE ${SERVER_PORT} ENTRYPOINT [ "java", "-jar", "api.jar" ] \ No newline at end of file diff --git a/api/pom.xml b/api/pom.xml index 18e0820c..816149a8 100755 --- a/api/pom.xml +++ b/api/pom.xml @@ -18,6 +18,8 @@ 1.6.11 17 + + @@ -314,4 +316,166 @@ + + + tests + + false + + + + + org.jacoco + jacoco-maven-plugin + 0.8.8 + + + + + + static-code-analysis + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + 3.2.0 + + + + + + com.github.spotbugs + spotbugs-maven-plugin + 4.7.3.0 + + + + spotbugs + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + 3.2.0 + + + + checkstyle + + + + + + org.apache.maven.plugins + maven-pmd-plugin + 3.20.0 + + 17 + + + + + cpd + pmd + + + + + + + + + + com.github.spotbugs + spotbugs-maven-plugin + 4.7.3.0 + + + com.github.spotbugs + spotbugs + 4.7.3 + + + + true + false + target + false + true + Max + Low + + + + spotbugs + + spotbugs + check + + + + + + org.apache.maven.plugins + maven-pmd-plugin + 3.20.0 + + false + 17 + + + + pmd-break + prepare-package + + check + cpd + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + 3.2.0 + + + verify-style + process-classes + + check + + + + verify-cpd + verify + + checkstyle + + + + + false + true + ${codestyle.checks.file} + ${codestyle.suppressions.file} + false + + + + com.puppycrawl.tools + checkstyle + 9.3 + + + + + + + \ No newline at end of file diff --git a/jenkins/Jenkinsfile b/jenkins/Jenkinsfile new file mode 100644 index 00000000..664bd941 --- /dev/null +++ b/jenkins/Jenkinsfile @@ -0,0 +1,108 @@ +pipeline { + agent any + parameters { + choice( + choices: ['sun_checks', 'sun_checks_with_suppressions', 'custom_checks', 'custom_checks_with_suppressions'], + description: 'Select xml file with checkstyle rules', + name: 'CHECKSTYLE_RULES_SET') + choice( + choices: ['maven3.8.4', 'maven3.9.0'], + description: 'Select maven version', + name: 'MAVEN_VERSION') + } + + + stages { + stage('Build') { + options { + timeout(time: 10, unit: 'MINUTES') + } + steps { + echo 'Download dependencies' + withMaven(maven: getMavenVersion(params.MAVEN_VERSION), publisherStrategy: 'EXPLICIT') { + sh "cd api \n\ + mvn -B -ntp compile" + } + } + } + stage('Static code analysis') { + options { + timeout(time: 10, unit: 'MINUTES') + } + steps { + script { + echo 'Static code analysis' + currentCheckstyleRulesStr = getCheckstyleRules(params.CHECKSTYLE_RULES_SET) + withMaven(maven: getMavenVersion(params.MAVEN_VERSION), publisherStrategy: 'EXPLICIT') { + sh "echo ${currentCheckstyleRulesStr} \n\ + cd api\n\ + mvn -B -ntp clean package -P static-code-analysis ${currentCheckstyleRulesStr}\n\ + echo \"static code analysis finished\"" + } + } + + echo "Reading static analysis report" + recordIssues enabledForFailure: true, failOnError: false, tool: checkStyle(pattern: "**/target/checkstyle-result.xml") + recordIssues enabledForFailure: true, failOnError: false, tool: spotBugs(pattern: "**/target/spotbugs*.xml") + recordIssues enabledForFailure: true, failOnError: false, tool: cpd(pattern: "**/target/cpd.xml") + recordIssues enabledForFailure: true, failOnError: false, tool: pmdParser(pattern: "**/target/pmd.xml") + } + } + + stage('Test') { + options { + timeout(time: 10, unit: 'MINUTES') + } + steps { + script { + echo 'Test' + withMaven(maven: getMavenVersion(params.MAVEN_VERSION), publisherStrategy: 'EXPLICIT') { + sh "cd api\n\ + mvn -B -ntp clean test -P tests \n\ + mvn jacoco:report" + } + } + + publishHTML([allowMissing: true, keepAll: true, alwaysLinkToLastBuild: true, reportDir: 'api/target/site/jacoco', reportFiles: 'index.html', reportName: 'Jacoco coverage HTML report']) + } + } + } +} + +def getMavenVersion(String selectedMavenVersion) { + String mvnVersion = '' + switch (selectedMavenVersion) { + case "maven3.8.4": + mvnVersion = 'maven3.8.4' + break + case "maven3.9.0": + mvnVersion = 'maven3.9.0' + break + default: + mvnVersion = 'maven3.8.4' + break + } + return mvnVersion +} + +def getCheckstyleRules(String selectedCheckstyleRules) { + String mvnOptions = '' + switch (selectedCheckstyleRules) { + case "sun_checks": + mvnOptions = '-Dcodestyle.checks.file=./codestyle/sun_checks.xml' + break + case "sun_checks_with_suppressions": + mvnOptions = '-Dcodestyle.checks.file=./codestyle/sun_checks.xml -Dcodestyle.suppressions.file=./codestyle/checkstyle_suppressions.xml' + break + case "custom_checks": + mvnOptions = '-Dcodestyle.checks.file=./codestyle/checkstyle_checks.xml' + break + case "custom_checks_with_suppressions": + mvnOptions = '-Dcodestyle.checks.file=./codestyle/checkstyle_checks.xml -Dcodestyle.suppressions.file=./codestyle/checkstyle_suppressions.xml' + break + default: + mvnOptions = '-Dcodestyle.checks.file=./codestyle/sun_checks.xml' + break + } + return mvnOptions +} \ No newline at end of file diff --git a/jenkins/README.md b/jenkins/README.md new file mode 100644 index 00000000..65428b29 --- /dev/null +++ b/jenkins/README.md @@ -0,0 +1,65 @@ +# Prerequisites. +We used ansible playbook from [project](https://github.com/Alliedium/awesome-jenkins/) to install and set up Jenkins and to create multistaged [pipeline](https://www.jenkins.io/doc/book/pipeline/getting-started/). + +# Pipeline description + Our Jenkins script has 4 stages: _Clone_, _Build_, _Static code analysis_ and _Test_. + We can select maven version that will be used during the build and different set of rules to perform static code analysis. + + 1. In the first stage the project will be cloned from the GitHub repository. + + 2. In the _Build_ stage maven dependencies are downloaded and jar file is build. Here we used the _default_ maven profile from our pom.xml file: + `mvn compile` + 3. The static code analysis is performed during the _Static code analysis_ stage by activating _static-code-analysis_ maven profile: + `mvn -X compile -P static-code-analysis` + 4. In the stage _Test_ the unit tests are running and the test coverage report is provided via maven command: + `mvn test -P tests` + +# Static code analysis + We used the following maven plugins to perform static code analysis: [spotbugs plugin](https://spotbugs.github.io/spotbugs-maven-plugin/), [pmd/cpd](https://pmd.github.io/latest/pmd_userdocs_tools_maven.html), [checkhttps://www.softwaretestinghelp.com/code-coverage-tools/style](https://checkstyle.org/) + + Spotbugs plugin is used to find bugs in Java cod. It can scan bytecode generated by JDK8. The support of JDK 11 and newer versions is still experimental. + + CPD plugin helps to avoid duplicated code in your project. It is usually used together with PMD plugin, that looks for bad practices. CPD and PMD plugins are concerned with Java and Apex, but support also some other languages. + The rules used by [Spotbugs](https://spotbugs.readthedocs.io/en/stable/filter.html#) and [PMD](https://pmd.github.io/pmd/pmd_userdocs_extending_writing_rules_intro.html) can be customized, however we used the default ones. + Checkstyle plugin performs code analysis according the convention standards. This combination of plugins is often used to perform static code analysis. + +### Checkstyle plugin + Checkstyle plugin uses by default sun style standards described in the [sun_checks.xml file](https://github.com/checkstyle/checkstyle/blob/master/src/main/resources/sun_checks.xml). +However, the [google style standards](https://github.com/checkstyle/checkstyle/blob/master/src/main/resources/google_checks.xml) or custom rules can be applied as well, by configuring plugin settings. + +To indicate your rule set file location add the following lines into the plugin configuration in pom.xml: `${codestyle.checks.file}` - for the rule set and `${codestyle.suppressions.file}` - for the suppressions + + or set them directly in the commandline: + `mvn -X install -P static-code-analysis -Dcodestyle.checks.file=./codestyle/checkstyle_checks.xml -Dcodestyle.suppressions.file=./codestyle/checkstyle_suppressions.xml` + + In this project we have 2 files with rules: _sun_checks.xml_; _checkstyle_checks.xml_ - all parameters are the same as in the sun rules set, but the maximum line length; 1 suppression file _checkstyle_suppressions.xml_ - here we have suppressed Javadoc checks. + So, Jenkins Job has 4 options/combinations for codestyle checks: +* sun_checks (default) +* sun_checks_with_suppressions +* custom_checks +* custom_checks_with_suppressions + +### Static code analysis reports + Static code analysis reports are published on Jenkins by using [WarningNG plugin](https://plugins.jenkins.io/warnings-ng/). + We disabled build failure in the case if number of bugs or/and warnings will exceed predefined values by setting `failOnError: false`. If you prefer to _fail_ your build you can set cut-off number of bugs and warnings for successful build and set `failOnError: true`. This parameter, as well as cut-off numbers are [set](https://www.jenkins.io/doc/pipeline/steps/warnings-ng/) separately for each static analysis tools supported by WarningNG plugin. + +## Test coverage report + We used [Jacoco maven plugin](https://www.eclemma.org/jacoco/trunk/doc/maven.html) to do test coverage analysis. + The report is published in Jenkins by the means of the [HTML publisher Jenkins plugin](https://plugins.jenkins.io/htmlpublisher/). + +## References: +1. [DevOps_guide](https://www.polestarllp.com/blog/devops-guide-pipeline-challenges-latest-trends) +2. [Git_workflow](https://www.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow) +3. [GitOps_principles](https://rafay.co/the-kubernetes-current/gitops-principles-and-workflows-every-team-should-know/) +4. [Introduction_to_maven_profiles](https://maven.apache.org/guides/introduction/introduction-to-profiles.html) +5. [Maven_profiles](https://www.baeldung.com/maven-profiles) +6. [Maven_profiles_detailed_explanation](https://medium.com/javarevisited/maven-profiles-detailed-explanation-1b4c8204466a) +7. [Jenkinsfile](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/) +8. [Pipeline syntax](https://www.jenkins.io/doc/book/pipeline/syntax/) +9. [Jenkins plugin - Pipeline: Stage Step](https://plugins.jenkins.io/pipeline-stage-step/) +10. [Jenkins plugin - Pipeline: Stage View](https://plugins.jenkins.io/pipeline-stage-view/) +11. [Comparison of findbugs(spotbugs), pmd and checkstyle plugins](https://www.sw-engineering-candies.com/blog-1/comparison-of-findbugs-pmd-and-checkstyle) +12. [Spotbugs](https://spotbugs.readthedocs.io/en/stable/introduction.html) +13. [Spotbugs maven plugin documentation](https://spotbugs.readthedocs.io/en/stable/maven.html) +14. [Test_coverage_tools](https://www.softwaretestinghelp.com/code-coverage-tools/) +15. [Jacoco_maven_plugin](https://www.baeldung.com/jacoco)