diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..312b5d2 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,23 @@ +repos: +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.6.9 + hooks: + - id: ruff-format + name: ruff format yaml + files: \.(yaml|yml)$ + args: [ --config=line-length=120 ] + - id: ruff + name: ruff lint yaml + files: \.(yaml|yml)$ + args: [ --fix, --config=line-length=120 ] + +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: trailing-whitespace + files: \.(yaml|yml)$ + - id: end-of-file-fixer + files: \.(yaml|yml)$ + - id: check-yaml + args: [ --multi, --unsafe ] + - id: check-added-large-files diff --git a/argo/apps/100-traefik/traefik.yaml b/argo/apps/100-traefik/application.yaml similarity index 82% rename from argo/apps/100-traefik/traefik.yaml rename to argo/apps/100-traefik/application.yaml index f524aa9..f7079ca 100644 --- a/argo/apps/100-traefik/traefik.yaml +++ b/argo/apps/100-traefik/application.yaml @@ -5,14 +5,14 @@ metadata: namespace: argocd # Add finalizer to ensure that Helm release is deleted before the app finalizers: - - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer spec: project: default source: # Source is the Git repository containing this Application manifest and the wrapper chart repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git path: argo/apps/100-traefik # Path to the wrapper chart directory within the Git repo - targetRevision: HEAD # Or your specific branch/tag + targetRevision: cloudfleet # Or your specific branch/tag # Helm configuration for the wrapper chart helm: @@ -29,4 +29,4 @@ spec: prune: true selfHeal: true syncOptions: - - CreateNamespace=true # Ensure the traefik namespace is created + - CreateNamespace=true # Ensure the traefik namespace is created diff --git a/argo/apps/100-traefik/values-default.yaml b/argo/apps/100-traefik/values-default.yaml new file mode 100644 index 0000000..c6daa8d --- /dev/null +++ b/argo/apps/100-traefik/values-default.yaml @@ -0,0 +1,1129 @@ +# Default values for Traefik +# This is a YAML-formatted file. +# Declare variables to be passed into templates + +image: # @schema additionalProperties: false + # -- Traefik image host registry + registry: docker.io + # -- Traefik image repository + repository: traefik + # -- defaults to appVersion. It's used for version checking, even prefixed with experimental- or latest-. + # When a digest is required, `versionOverride` can be used to set the version. + tag: # @schema type:[string, null] + # -- Traefik image pull policy + pullPolicy: IfNotPresent + +# -- Add additional label to all resources +commonLabels: {} + +deployment: + # -- Enable deployment + enabled: true + # -- Deployment or DaemonSet + kind: Deployment + # -- Number of pods of the deployment (only applies when kind == Deployment) + replicas: 1 + # -- Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) + revisionHistoryLimit: # @schema type:[integer, null];minimum:0 + # -- Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down + terminationGracePeriodSeconds: 60 + # -- The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available + minReadySeconds: 0 + ## -- Override the liveness/readiness port. This is useful to integrate traefik + ## with an external Load Balancer that performs healthchecks. + ## Default: ports.traefik.port + healthchecksPort: # @schema type:[integer, null];minimum:0 + ## -- Override the liveness/readiness host. Useful for getting ping to respond on non-default entryPoint. + ## Default: ports.traefik.hostIP if set, otherwise Pod IP + healthchecksHost: "" + ## -- Override the liveness/readiness scheme. Useful for getting ping to + ## respond on websecure entryPoint. + healthchecksScheme: # @schema enum:[HTTP, HTTPS, null]; type:[string, null]; default: HTTP + ## -- Override the readiness path. + ## Default: /ping + readinessPath: "" + # -- Override the liveness path. + # Default: /ping + livenessPath: "" + # -- Additional deployment annotations (e.g. for jaeger-operator sidecar injection) + annotations: {} + # -- Additional deployment labels (e.g. for filtering deployment by custom labels) + labels: {} + # -- Additional pod annotations (e.g. for mesh injection or prometheus scraping) + # It supports templating. One can set it with values like traefik/name: '{{ template "traefik.name" . }}' + podAnnotations: {} + # -- Additional Pod labels (e.g. for filtering Pod by custom labels) + podLabels: {} + # -- Additional containers (e.g. for metric offloading sidecars) + additionalContainers: [] + # https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host + # - name: socat-proxy + # image: alpine/socat:1.0.5 + # args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"] + # volumeMounts: + # - name: dsdsocket + # mountPath: /socket + # -- Additional volumes available for use with initContainers and additionalContainers + additionalVolumes: [] + # - name: dsdsocket + # hostPath: + # path: /var/run/statsd-exporter + # -- Additional initContainers (e.g. for setting file permission as shown below) + initContainers: [] + # The "volume-permissions" init container is required if you run into permission issues. + # Related issue: https://github.com/traefik/traefik-helm-chart/issues/396 + # - name: volume-permissions + # image: busybox:latest + # command: ["sh", "-c", "touch /data/acme.json; chmod -v 600 /data/acme.json"] + # volumeMounts: + # - name: data + # mountPath: /data + # -- Use process namespace sharing + shareProcessNamespace: false + # -- Custom pod DNS policy. Apply if `hostNetwork: true` + dnsPolicy: "" + # -- Custom pod [DNS config](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#poddnsconfig-v1-core) + dnsConfig: {} + # -- Custom [host aliases](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) + hostAliases: [] + # -- Pull secret for fetching traefik container image + imagePullSecrets: [] + # -- Pod lifecycle actions + lifecycle: {} + # preStop: + # exec: + # command: ["/bin/sh", "-c", "sleep 40"] + # postStart: + # httpGet: + # path: /ping + # port: 8080 + # host: localhost + # scheme: HTTP + # -- Set a runtimeClassName on pod + runtimeClassName: "" + +# -- [Pod Disruption Budget](https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1/) +podDisruptionBudget: # @schema additionalProperties: false + enabled: false + maxUnavailable: # @schema type:[string, integer, null];minimum:0 + minAvailable: # @schema type:[string, integer, null];minimum:0 + +# -- Create a default IngressClass for Traefik +ingressClass: # @schema additionalProperties: false + enabled: true + isDefaultClass: true + name: "" + +core: # @schema additionalProperties: false + # -- Can be used to use globally v2 router syntax. Deprecated since v3.4 /!\. + # See https://doc.traefik.io/traefik/v3.0/migration/v2-to-v3/#new-v3-syntax-notable-changes + defaultRuleSyntax: "" + +# Traefik experimental features +experimental: + # -- Defines whether all plugins must be loaded successfully for Traefik to start + abortOnPluginFailure: false + fastProxy: + # -- Enables the FastProxy implementation. + enabled: false + # -- Enable debug mode for the FastProxy implementation. + debug: false + kubernetesGateway: + # -- Enable traefik experimental GatewayClass CRD + enabled: false + # -- Enable traefik experimental plugins + plugins: {} + # demo: + # moduleName: github.com/traefik/plugindemo + # version: v0.2.1 + +gateway: + # -- When providers.kubernetesGateway.enabled, deploy a default gateway + enabled: true + # -- Set a custom name to gateway + name: "" + # -- By default, Gateway is created in the same `Namespace` than Traefik. + namespace: "" + # -- Additional gateway annotations (e.g. for cert-manager.io/issuer) + annotations: {} + # -- [Infrastructure](https://kubernetes.io/blog/2023/11/28/gateway-api-ga/#gateway-infrastructure-labels) + infrastructure: {} + # -- Define listeners + listeners: + web: + # -- Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules. + # The port must match a port declared in ports section. + port: 8000 + # -- Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname) + hostname: "" + # Specify expected protocol on this listener. See [ProtocolType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.ProtocolType) + protocol: HTTP + # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces + namespacePolicy: # @schema type:[string, null] + # websecure listener is disabled by default because certificateRefs needs to be added, + # or you may specify TLS protocol with Passthrough mode and add "--providers.kubernetesGateway.experimentalChannel=true" in additionalArguments section. + # websecure: + # # -- Port is the network port. Multiple listeners may use the same port, subject to the Listener compatibility rules. + # # The port must match a port declared in ports section. + # port: 8443 + # # -- Optional hostname. See [Hostname](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Hostname) + # hostname: + # # Specify expected protocol on this listener See [ProtocolType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.ProtocolType) + # protocol: HTTPS + # # -- Routes are restricted to namespace of the gateway [by default](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.FromNamespaces) + # namespacePolicy: + # # -- Add certificates for TLS or HTTPS protocols. See [GatewayTLSConfig](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayTLSConfig) + # certificateRefs: + # # -- TLS behavior for the TLS session initiated by the client. See [TLSModeType](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.TLSModeType). + # mode: + +gatewayClass: # @schema additionalProperties: false + # -- When providers.kubernetesGateway.enabled and gateway.enabled, deploy a default gatewayClass + enabled: true + # -- Set a custom name to GatewayClass + name: "" + # -- Additional gatewayClass labels (e.g. for filtering gateway objects by custom labels) + labels: {} + +# -- Only dashboard & healthcheck IngressRoute are supported. It's recommended to create workloads CR outside of this Chart. +ingressRoute: + dashboard: + # -- Create an IngressRoute for the dashboard + enabled: false + # -- Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) + annotations: {} + # -- Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) + labels: {} + # -- The router match rule used for the dashboard ingressRoute + matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`) + # -- The internal service used for the dashboard ingressRoute + services: + - name: api@internal + kind: TraefikService + # -- Specify the allowed entrypoints to use for the dashboard ingress route, (e.g. traefik, web, websecure). + # By default, it's using traefik entrypoint, which is not exposed. + # /!\ Do not expose your dashboard without any protection over the internet /!\ + entryPoints: ["traefik"] + # -- Additional ingressRoute middlewares (e.g. for authentication) + middlewares: [] + # -- TLS options (e.g. secret containing certificate) + tls: {} + healthcheck: + # -- Create an IngressRoute for the healthcheck probe + enabled: false + # -- Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class) + annotations: {} + # -- Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels) + labels: {} + # -- The router match rule used for the healthcheck ingressRoute + matchRule: PathPrefix(`/ping`) + # -- The internal service used for the healthcheck ingressRoute + services: + - name: ping@internal + kind: TraefikService + # -- Specify the allowed entrypoints to use for the healthcheck ingress route, (e.g. traefik, web, websecure). + # By default, it's using traefik entrypoint, which is not exposed. + entryPoints: ["traefik"] + # -- Additional ingressRoute middlewares (e.g. for authentication) + middlewares: [] + # -- TLS options (e.g. secret containing certificate) + tls: {} + +updateStrategy: # @schema additionalProperties: false + # -- Customize updateStrategy of Deployment or DaemonSet + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 # @schema type:[integer, string, null] + maxSurge: 1 # @schema type:[integer, string, null] + +readinessProbe: # @schema additionalProperties: false + # -- The number of consecutive failures allowed before considering the probe as failed. + failureThreshold: 1 + # -- The number of seconds to wait before starting the first probe. + initialDelaySeconds: 2 + # -- The number of seconds to wait between consecutive probes. + periodSeconds: 10 + # -- The minimum consecutive successes required to consider the probe successful. + successThreshold: 1 + # -- The number of seconds to wait for a probe response before considering it as failed. + timeoutSeconds: 2 +livenessProbe: # @schema additionalProperties: false + # -- The number of consecutive failures allowed before considering the probe as failed. + failureThreshold: 3 + # -- The number of seconds to wait before starting the first probe. + initialDelaySeconds: 2 + # -- The number of seconds to wait between consecutive probes. + periodSeconds: 10 + # -- The minimum consecutive successes required to consider the probe successful. + successThreshold: 1 + # -- The number of seconds to wait for a probe response before considering it as failed. + timeoutSeconds: 2 + +# -- Define [Startup Probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes) +startupProbe: {} + +providers: # @schema additionalProperties: false + kubernetesCRD: + # -- Load Kubernetes IngressRoute provider + enabled: true + # -- Allows IngressRoute to reference resources in namespace other than theirs + allowCrossNamespace: false + # -- Allows to reference ExternalName services in IngressRoute + allowExternalNameServices: false + # -- Allows to return 503 when there is no endpoints available + allowEmptyServices: true + # -- When the parameter is set, only resources containing an annotation with the same value are processed. Otherwise, resources missing the annotation, having an empty value, or the value traefik are processed. It will also set required annotation on Dashboard and Healthcheck IngressRoute when enabled. + ingressClass: "" + # labelSelector: environment=production,method=traefik + # -- Array of namespaces to watch. If left empty, Traefik watches all namespaces. . When using `rbac.namespaced`, it will watch helm release namespace and namespaces listed in this array. + namespaces: [] + # -- Defines whether to use Native Kubernetes load-balancing mode by default. + nativeLBByDefault: false + + kubernetesIngress: + # -- Load Kubernetes Ingress provider + enabled: true + # -- Allows to reference ExternalName services in Ingress + allowExternalNameServices: false + # -- Allows to return 503 when there is no endpoints available + allowEmptyServices: true + # -- When ingressClass is set, only Ingresses containing an annotation with the same value are processed. Otherwise, Ingresses missing the annotation, having an empty value, or the value traefik are processed. + ingressClass: # @schema type:[string, null] + # labelSelector: environment=production,method=traefik + # -- Array of namespaces to watch. If left empty, Traefik watches all namespaces. . When using `rbac.namespaced`, it will watch helm release namespace and namespaces listed in this array. + namespaces: [] + # IP used for Kubernetes Ingress endpoints + publishedService: + # -- Enable [publishedService](https://doc.traefik.io/traefik/providers/kubernetes-ingress/#publishedservice) + enabled: true + # -- Override path of Kubernetes Service used to copy status from. Format: namespace/servicename. + # Default to Service deployed with this Chart. + pathOverride: "" + # -- Defines whether to use Native Kubernetes load-balancing mode by default. + nativeLBByDefault: false + + kubernetesGateway: + # -- Enable Traefik Gateway provider for Gateway API + enabled: false + # -- Toggles support for the Experimental Channel resources (Gateway API release channels documentation). + # This option currently enables support for TCPRoute and TLSRoute. + experimentalChannel: false + # -- Array of namespaces to watch. If left empty, Traefik watches all namespaces. . When using `rbac.namespaced`, it will watch helm release namespace and namespaces listed in this array. + namespaces: [] + # -- A label selector can be defined to filter on specific GatewayClass objects only. + labelselector: "" + # -- Defines whether to use Native Kubernetes load-balancing mode by default. + nativeLBByDefault: false + statusAddress: + # -- This IP will get copied to the Gateway status.addresses, and currently only supports one IP value (IPv4 or IPv6). + ip: "" + # -- This Hostname will get copied to the Gateway status.addresses. + hostname: "" + # -- The Kubernetes service to copy status addresses from. When using third parties tools like External-DNS, this option can be used to copy the service loadbalancer.status (containing the service's endpoints IPs) to the gateways. Default to Service of this Chart. + service: + enabled: true + name: "" + namespace: "" + + file: + # -- Create a file provider + enabled: false + # -- Allows Traefik to automatically watch for file changes + watch: true + # -- File content (YAML format, go template supported) (see https://doc.traefik.io/traefik/providers/file/) + content: "" + +# -- Add volumes to the traefik pod. The volume name will be passed to tpl. +# This can be used to mount a cert pair or a configmap that holds a config.toml file. +# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg: +# `additionalArguments: +# - "--providers.file.filename=/config/dynamic.toml" +# - "--ping" +# - "--ping.entrypoint=web"` +volumes: [] +# - name: public-cert +# mountPath: "/certs" +# type: secret +# - name: '{{ printf "%s-configs" .Release.Name }}' +# mountPath: "/config" +# type: configMap + +# -- Additional volumeMounts to add to the Traefik container +additionalVolumeMounts: [] +# -- For instance when using a logshipper for access logs +# - name: traefik-logs +# mountPath: /var/log/traefik + +logs: + general: + # -- Set [logs format](https://doc.traefik.io/traefik/observability/logs/#format) + format: # @schema enum:["common", "json", null]; type:[string, null]; default: "common" + # By default, the level is set to INFO. + # -- Alternative logging levels are TRACE, DEBUG, INFO, WARN, ERROR, FATAL, and PANIC. + level: "INFO" # @schema enum:[TRACE,DEBUG,INFO,WARN,ERROR,FATAL,PANIC]; default: "INFO" + # -- To write the logs into a log file, use the filePath option. + filePath: "" + # -- When set to true and format is common, it disables the colorized output. + noColor: false + access: + # -- To enable access logs + enabled: false + # -- Set [access log format](https://doc.traefik.io/traefik/observability/access-logs/#format) + format: # @schema enum:["common", "json", null]; type:[string, null]; default: "common" + # filePath: "/var/log/traefik/access.log + # -- Set [bufferingSize](https://doc.traefik.io/traefik/observability/access-logs/#bufferingsize) + bufferingSize: # @schema type:[integer, null] + # -- Set [filtering](https://docs.traefik.io/observability/access-logs/#filtering) + filters: # @schema additionalProperties: false + # -- Set statusCodes, to limit the access logs to requests with a status codes in the specified range + statuscodes: "" + # -- Set retryAttempts, to keep the access logs when at least one retry has happened + retryattempts: false + # -- Set minDuration, to keep access logs when requests take longer than the specified duration + minduration: "" + # -- Enables accessLogs for internal resources. Default: false. + addInternals: false + fields: + general: + # -- Set default mode for fields.names + defaultmode: keep # @schema enum:[keep, drop, redact]; default: keep + # -- Names of the fields to limit. + names: {} + # -- [Limit logged fields or headers](https://doc.traefik.io/traefik/observability/access-logs/#limiting-the-fieldsincluding-headers) + headers: + # -- Set default mode for fields.headers + defaultmode: drop # @schema enum:[keep, drop, redact]; default: drop + names: {} + +metrics: + # -- Enable metrics for internal resources. Default: false + addInternals: false + + ## Prometheus is enabled by default. + ## It can be disabled by setting "prometheus: null" + prometheus: + # -- Entry point used to expose metrics. + entryPoint: metrics + # -- Enable metrics on entry points. Default: true + addEntryPointsLabels: # @schema type:[boolean, null] + # -- Enable metrics on routers. Default: false + addRoutersLabels: # @schema type:[boolean, null] + # -- Enable metrics on services. Default: true + addServicesLabels: # @schema type:[boolean, null] + # -- Buckets for latency metrics. Default="0.1,0.3,1.2,5.0" + buckets: "" + # -- When manualRouting is true, it disables the default internal router in + ## order to allow creating a custom router for prometheus@internal service. + manualRouting: false + # -- Add HTTP header labels to metrics. See EXAMPLES.md or upstream doc for usage. + headerLabels: {} # @schema type:[object, null] + service: + # -- Create a dedicated metrics service to use with ServiceMonitor + enabled: false + labels: {} + annotations: {} + # -- When set to true, it won't check if Prometheus Operator CRDs are deployed + disableAPICheck: # @schema type:[boolean, null] + serviceMonitor: + # -- Enable optional CR for Prometheus Operator. See EXAMPLES.md for more details. + enabled: false + metricRelabelings: [] + relabelings: [] + jobLabel: "" + interval: "" + honorLabels: false + scrapeTimeout: "" + honorTimestamps: false + enableHttp2: false + followRedirects: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + prometheusRule: + # -- Enable optional CR for Prometheus Operator. See EXAMPLES.md for more details. + enabled: false + additionalLabels: {} + namespace: "" + + # datadog: + # ## Address instructs exporter to send metrics to datadog-agent at this address. + # address: "127.0.0.1:8125" + # ## The interval used by the exporter to push metrics to datadog-agent. Default=10s + # # pushInterval: 30s + # ## The prefix to use for metrics collection. Default="traefik" + # # prefix: traefik + # ## Enable metrics on entry points. Default=true + # # addEntryPointsLabels: false + # ## Enable metrics on routers. Default=false + # # addRoutersLabels: true + # ## Enable metrics on services. Default=true + # # addServicesLabels: false + # influxdb2: + # ## Address instructs exporter to send metrics to influxdb v2 at this address. + # address: localhost:8086 + # ## Token with which to connect to InfluxDB v2. + # token: xxx + # ## Organisation where metrics will be stored. + # org: "" + # ## Bucket where metrics will be stored. + # bucket: "" + # ## The interval used by the exporter to push metrics to influxdb. Default=10s + # # pushInterval: 30s + # ## Additional labels (influxdb tags) on all metrics. + # # additionalLabels: + # # env: production + # # foo: bar + # ## Enable metrics on entry points. Default=true + # # addEntryPointsLabels: false + # ## Enable metrics on routers. Default=false + # # addRoutersLabels: true + # ## Enable metrics on services. Default=true + # # addServicesLabels: false + # statsd: + # ## Address instructs exporter to send metrics to statsd at this address. + # address: localhost:8125 + # ## The interval used by the exporter to push metrics to influxdb. Default=10s + # # pushInterval: 30s + # ## The prefix to use for metrics collection. Default="traefik" + # # prefix: traefik + # ## Enable metrics on entry points. Default=true + # # addEntryPointsLabels: false + # ## Enable metrics on routers. Default=false + # # addRoutersLabels: true + # ## Enable metrics on services. Default=true + # # addServicesLabels: false + otlp: + # -- Set to true in order to enable the OpenTelemetry metrics + enabled: false + # -- Enable metrics on entry points. Default: true + addEntryPointsLabels: # @schema type:[boolean, null] + # -- Enable metrics on routers. Default: false + addRoutersLabels: # @schema type:[boolean, null] + # -- Enable metrics on services. Default: true + addServicesLabels: # @schema type:[boolean, null] + # -- Explicit boundaries for Histogram data points. Default: [.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10] + explicitBoundaries: [] + # -- Interval at which metrics are sent to the OpenTelemetry Collector. Default: 10s + pushInterval: "" + # -- Service name used in OTLP backend. Default: traefik. + serviceName: # @schema type:[string, null] + http: + # -- Set to true in order to send metrics to the OpenTelemetry Collector using HTTP. + enabled: false + # -- Format: ://:. Default: http://localhost:4318/v1/metrics + endpoint: "" + # -- Additional headers sent with metrics by the reporter to the OpenTelemetry Collector. + headers: {} + ## Defines the TLS configuration used by the reporter to send metrics to the OpenTelemetry Collector. + tls: + # -- The path to the certificate authority, it defaults to the system bundle. + ca: "" + # -- The path to the public certificate. When using this option, setting the key option is required. + cert: "" + # -- The path to the private key. When using this option, setting the cert option is required. + key: "" + # -- When set to true, the TLS connection accepts any certificate presented by the server regardless of the hostnames it covers. + insecureSkipVerify: # @schema type:[boolean, null] + grpc: + # -- Set to true in order to send metrics to the OpenTelemetry Collector using gRPC + enabled: false + # -- Format: ://:. Default: http://localhost:4318/v1/metrics + endpoint: "" + # -- Allows reporter to send metrics to the OpenTelemetry Collector without using a secured protocol. + insecure: false + ## Defines the TLS configuration used by the reporter to send metrics to the OpenTelemetry Collector. + tls: + # -- The path to the certificate authority, it defaults to the system bundle. + ca: "" + # -- The path to the public certificate. When using this option, setting the key option is required. + cert: "" + # -- The path to the private key. When using this option, setting the cert option is required. + key: "" + # -- When set to true, the TLS connection accepts any certificate presented by the server regardless of the hostnames it covers. + insecureSkipVerify: false + +## Tracing +# -- https://doc.traefik.io/traefik/observability/tracing/overview/ +tracing: # @schema additionalProperties: false + # -- Enables tracing for internal resources. Default: false. + addInternals: false + # -- Service name used in selected backend. Default: traefik. + serviceName: # @schema type:[string, null] + # -- Defines additional resource attributes to be sent to the collector. + resourceAttributes: {} + # -- Defines the list of request headers to add as attributes. It applies to client and server kind spans. + capturedRequestHeaders: [] + # -- Defines the list of response headers to add as attributes. It applies to client and server kind spans. + capturedResponseHeaders: [] + # -- By default, all query parameters are redacted. Defines the list of query parameters to not redact. + safeQueryParams: [] + # -- The proportion of requests to trace, specified between 0.0 and 1.0. Default: 1.0. + sampleRate: # @schema type:[number, null]; minimum:0; maximum:1 + otlp: + # -- See https://doc.traefik.io/traefik/v3.0/observability/tracing/opentelemetry/ + enabled: false + http: + # -- Set to true in order to send metrics to the OpenTelemetry Collector using HTTP. + enabled: false + # -- Format: ://:. Default: http://localhost:4318/v1/metrics + endpoint: "" + # -- Additional headers sent with metrics by the reporter to the OpenTelemetry Collector. + headers: {} + ## Defines the TLS configuration used by the reporter to send metrics to the OpenTelemetry Collector. + tls: + # -- The path to the certificate authority, it defaults to the system bundle. + ca: "" + # -- The path to the public certificate. When using this option, setting the key option is required. + cert: "" + # -- The path to the private key. When using this option, setting the cert option is required. + key: "" + # -- When set to true, the TLS connection accepts any certificate presented by the server regardless of the hostnames it covers. + insecureSkipVerify: false + grpc: + # -- Set to true in order to send metrics to the OpenTelemetry Collector using gRPC + enabled: false + # -- Format: ://:. Default: http://localhost:4318/v1/metrics + endpoint: "" + # -- Allows reporter to send metrics to the OpenTelemetry Collector without using a secured protocol. + insecure: false + ## Defines the TLS configuration used by the reporter to send metrics to the OpenTelemetry Collector. + tls: + # -- The path to the certificate authority, it defaults to the system bundle. + ca: "" + # -- The path to the public certificate. When using this option, setting the key option is required. + cert: "" + # -- The path to the private key. When using this option, setting the cert option is required. + key: "" + # -- When set to true, the TLS connection accepts any certificate presented by the server regardless of the hostnames it covers. + insecureSkipVerify: false + +global: + checkNewVersion: true + # -- Please take time to consider whether or not you wish to share anonymous data with us + # See https://doc.traefik.io/traefik/contributing/data-collection/ + sendAnonymousUsage: false + # -- Required for Azure Marketplace integration. + # See https://learn.microsoft.com/en-us/partner-center/marketplace-offers/azure-container-technical-assets-kubernetes?tabs=linux,linux2#update-the-helm-chart + azure: + # -- Enable specific values for Azure Marketplace + enabled: false + images: + proxy: + image: traefik + tag: latest + registry: docker.io/library + hub: + image: traefik-hub + tag: latest + registry: ghcr.io/traefik + +# -- Additional arguments to be passed at Traefik's binary +# See [CLI Reference](https://docs.traefik.io/reference/static-configuration/cli/) +# Use curly braces to pass values: `helm install --set="additionalArguments={--providers.kubernetesingress.ingressclass=traefik-internal,--log.level=DEBUG}"` +additionalArguments: [] +# - "--providers.kubernetesingress.ingressclass=traefik-internal" +# - "--log.level=DEBUG" + +# -- Additional Environment variables to be passed to Traefik's binary +# @default -- See _values.yaml_ +env: [] + +# -- Environment variables to be passed to Traefik's binary from configMaps or secrets +envFrom: [] + +ports: + traefik: + port: 8080 + # -- Use hostPort if set. + hostPort: # @schema type:[integer, null]; minimum:0 + # -- Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which + # means it's listening on all your interfaces and all your IPs. You may want + # to set this value if you need traefik to listen on specific interface + # only. + hostIP: # @schema type:[string, null] + + # Defines whether the port is exposed if service.type is LoadBalancer or + # NodePort. + # + # -- You SHOULD NOT expose the traefik port on production deployments. + # If you want to access it from outside your cluster, + # use `kubectl port-forward` or create a secure ingress + expose: + default: false + # -- The exposed port for this service + exposedPort: 8080 + # -- The port protocol (TCP/UDP) + protocol: TCP + web: + ## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicitly set an entrypoint it will only use this entrypoint. + # asDefault: true + port: 8000 + # hostPort: 8000 + # containerPort: 8000 + expose: + default: true + exposedPort: 80 + ## -- Different target traefik port on the cluster, useful for IP type LB + targetPort: # @schema type:[string, integer, null]; minimum:0 + # The port protocol (TCP/UDP) + protocol: TCP + # -- See [upstream documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) + nodePort: # @schema type:[integer, null]; minimum:0 + redirections: + # -- Port Redirections + # Added in 2.2, one can make permanent redirects via entrypoints. + # Same sets of parameters: to, scheme, permanent and priority. + # https://docs.traefik.io/routing/entrypoints/#redirection + entryPoint: {} + forwardedHeaders: + # -- Trust forwarded headers information (X-Forwarded-*). + trustedIPs: [] + insecure: false + proxyProtocol: + # -- Enable the Proxy Protocol header parsing for the entry point + trustedIPs: [] + insecure: false + # -- Set transport settings for the entrypoint; see also + # https://doc.traefik.io/traefik/routing/entrypoints/#transport + transport: + respondingTimeouts: + readTimeout: # @schema type:[string, integer, null] + writeTimeout: # @schema type:[string, integer, null] + idleTimeout: # @schema type:[string, integer, null] + lifeCycle: + requestAcceptGraceTimeout: # @schema type:[string, integer, null] + graceTimeOut: # @schema type:[string, integer, null] + keepAliveMaxRequests: # @schema type:[integer, null]; minimum:0 + keepAliveMaxTime: # @schema type:[string, integer, null] + websecure: + ## -- Enable this entrypoint as a default entrypoint. When a service doesn't explicitly set an entrypoint it will only use this entrypoint. + # asDefault: true + port: 8443 + hostPort: # @schema type:[integer, null]; minimum:0 + containerPort: # @schema type:[integer, null]; minimum:0 + expose: + default: true + exposedPort: 443 + ## -- Different target traefik port on the cluster, useful for IP type LB + targetPort: # @schema type:[string, integer, null]; minimum:0 + ## -- The port protocol (TCP/UDP) + protocol: TCP + # -- See [upstream documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) + nodePort: # @schema type:[integer, null]; minimum:0 + # -- See [upstream documentation](https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol) + appProtocol: # @schema type:[string, null] + # -- See [upstream documentation](https://doc.traefik.io/traefik/routing/entrypoints/#allowacmebypass) + allowACMEByPass: false + http3: + ## -- Enable HTTP/3 on the entrypoint + ## Enabling it will also enable http3 experimental feature + ## https://doc.traefik.io/traefik/routing/entrypoints/#http3 + ## There are known limitations when trying to listen on same ports for + ## TCP & UDP (Http3). There is a workaround in this chart using dual Service. + ## https://github.com/kubernetes/kubernetes/issues/47249#issuecomment-587960741 + enabled: false + advertisedPort: # @schema type:[integer, null]; minimum:0 + forwardedHeaders: + # -- Trust forwarded headers information (X-Forwarded-*). + trustedIPs: [] + insecure: false + proxyProtocol: + # -- Enable the Proxy Protocol header parsing for the entry point + trustedIPs: [] + insecure: false + # -- See [upstream documentation](https://doc.traefik.io/traefik/routing/entrypoints/#transport) + transport: + respondingTimeouts: + readTimeout: # @schema type:[string, integer, null] + writeTimeout: # @schema type:[string, integer, null] + idleTimeout: # @schema type:[string, integer, null] + lifeCycle: + requestAcceptGraceTimeout: # @schema type:[string, integer, null] + graceTimeOut: # @schema type:[string, integer, null] + keepAliveMaxRequests: # @schema type:[integer, null]; minimum:0 + keepAliveMaxTime: # @schema type:[string, integer, null] + # -- See [upstream documentation](https://doc.traefik.io/traefik/routing/entrypoints/#tls) + tls: + enabled: true + options: "" + certResolver: "" + domains: [] + # -- One can apply Middlewares on an entrypoint + # https://doc.traefik.io/traefik/middlewares/overview/ + # https://doc.traefik.io/traefik/routing/entrypoints/#middlewares + # -- /!\ It introduces here a link between your static configuration and your dynamic configuration /!\ + # It follows the provider naming convention: https://doc.traefik.io/traefik/providers/overview/#provider-namespace + # - namespace-name1@kubernetescrd + # - namespace-name2@kubernetescrd + middlewares: [] + metrics: + # -- When using hostNetwork, use another port to avoid conflict with node exporter: + # https://github.com/prometheus/prometheus/wiki/Default-port-allocations + port: 9100 + # -- You may not want to expose the metrics port on production deployments. + # If you want to access it from outside your cluster, + # use `kubectl port-forward` or create a secure ingress + expose: + default: false + # -- The exposed port for this service + exposedPort: 9100 + # -- The port protocol (TCP/UDP) + protocol: TCP + +# -- TLS Options are created as [TLSOption CRDs](https://doc.traefik.io/traefik/https/tls/#tls-options) +# When using `labelSelector`, you'll need to set labels on tlsOption accordingly. +# See EXAMPLE.md for details. +tlsOptions: {} + +# -- TLS Store are created as [TLSStore CRDs](https://doc.traefik.io/traefik/https/tls/#default-certificate). This is useful if you want to set a default certificate. See EXAMPLE.md for details. +tlsStore: {} + +service: + enabled: true + ## -- Single service is using `MixedProtocolLBService` feature gate. + ## -- When set to false, it will create two Service, one for TCP and one for UDP. + single: true + type: LoadBalancer + # -- Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config) + annotations: {} + # -- Additional annotations for TCP service only + annotationsTCP: {} + # -- Additional annotations for UDP service only + annotationsUDP: {} + # -- Additional service labels (e.g. for filtering Service by custom labels) + labels: {} + # -- Additional entries here will be added to the service spec. + # -- Cannot contain type, selector or ports entries. + spec: {} + # externalTrafficPolicy: Cluster + # loadBalancerIP: "1.2.3.4" + # clusterIP: "2.3.4.5" + loadBalancerSourceRanges: [] + # - 192.168.0.1/32 + # - 172.16.0.0/16 + ## -- Class of the load balancer implementation + # loadBalancerClass: service.k8s.aws/nlb + externalIPs: [] + # - 1.2.3.4 + ## One of SingleStack, PreferDualStack, or RequireDualStack. + # ipFamilyPolicy: SingleStack + ## List of IP families (e.g. IPv4 and/or IPv6). + ## ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + # ipFamilies: + # - IPv4 + # - IPv6 + ## + additionalServices: {} + ## -- An additional and optional internal Service. + ## Same parameters as external Service + # internal: + # type: ClusterIP + # # labels: {} + # # annotations: {} + # # spec: {} + # # loadBalancerSourceRanges: [] + # # externalIPs: [] + # # ipFamilies: [ "IPv4","IPv6" ] + +autoscaling: + # -- Create HorizontalPodAutoscaler object. + # See EXAMPLES.md for more details. + enabled: false + +persistence: + # -- Enable persistence using Persistent Volume Claims + # ref: http://kubernetes.io/docs/user-guide/persistent-volumes/. + # It can be used to store TLS certificates along with `certificatesResolvers..acme.storage` option + enabled: false + name: data + existingClaim: "" + accessMode: ReadWriteOnce + size: 128Mi + storageClass: "" + volumeName: "" + path: /data + annotations: {} + # -- Only mount a subpath of the Volume into the pod + subPath: "" + +# -- Certificates resolvers configuration. +# Ref: https://doc.traefik.io/traefik/https/acme/#certificate-resolvers +# See EXAMPLES.md for more details. +certificatesResolvers: {} + +# -- If hostNetwork is true, runs traefik in the host network namespace +# To prevent unschedulable pods due to port collisions, if hostNetwork=true +# and replicas>1, a pod anti-affinity is recommended and will be set if the +# affinity is left as default. +hostNetwork: false + +# -- Whether Role Based Access Control objects like roles and rolebindings should be created +rbac: # @schema additionalProperties: false + enabled: true + # When set to true: + # 1. It switches respectively the use of `ClusterRole` and `ClusterRoleBinding` to `Role` and `RoleBinding`. + # 2. It adds `disableIngressClassLookup` on Kubernetes Ingress with Traefik Proxy v3 until v3.1.4 + # 3. It adds `disableClusterScopeResources` on Ingress and CRD (Kubernetes) providers with Traefik Proxy v3.1.2+ + # **NOTE**: `IngressClass`, `NodePortLB` and **Gateway** provider cannot be used with namespaced RBAC. + # See [upstream documentation](https://doc.traefik.io/traefik/providers/kubernetes-ingress/#disableclusterscoperesources) for more details. + namespaced: false + # Enable user-facing roles + # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + aggregateTo: [] + # List of Kubernetes secrets that are accessible for Traefik. If empty, then access is granted to every secret. + secretResourceNames: [] + +# -- Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding +podSecurityPolicy: + enabled: false + +# -- The service account the pods will use to interact with the Kubernetes API +serviceAccount: # @schema additionalProperties: false + # If set, an existing service account is used + # If not set, a service account is created automatically using the fullname template + name: "" + +# -- Additional serviceAccount annotations (e.g. for oidc authentication) +serviceAccountAnnotations: {} + +# -- [Resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for `traefik` container. +resources: {} + +# -- This example pod anti-affinity forces the scheduler to put traefik pods +# -- on nodes where no other traefik pods are scheduled. +# It should be used when hostNetwork: true to prevent port conflicts +affinity: {} +# podAntiAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# - labelSelector: +# matchLabels: +# app.kubernetes.io/name: '{{ template "traefik.name" . }}' +# app.kubernetes.io/instance: '{{ .Release.Name }}-{{ include "traefik.namespace" . }}' +# topologyKey: kubernetes.io/hostname + +# -- nodeSelector is the simplest recommended form of node selection constraint. +nodeSelector: {} +# -- Tolerations allow the scheduler to schedule pods with matching taints. +tolerations: [] +# -- You can use topology spread constraints to control +# how Pods are spread across your cluster among failure-domains. +topologySpreadConstraints: [] +# This example topologySpreadConstraints forces the scheduler to put traefik pods +# on nodes where no other traefik pods are scheduled. +# - labelSelector: +# matchLabels: +# app.kubernetes.io/name: '{{ template "traefik.name" . }}' +# maxSkew: 1 +# topologyKey: kubernetes.io/hostname +# whenUnsatisfiable: DoNotSchedule + +# -- [Pod Priority and Preemption](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) +priorityClassName: "" + +# -- [SecurityContext](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1) +# @default -- See _values.yaml_ +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ALL] + readOnlyRootFilesystem: true + +# -- [Pod Security Context](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context) +# @default -- See _values.yaml_ +podSecurityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + +# +# -- Extra objects to deploy (value evaluated as a template) +# +# In some cases, it can avoid the need for additional, extended or adhoc deployments. +# See #595 for more details and traefik/tests/values/extra.yaml for example. +extraObjects: [] + +# -- This field override the default Release Namespace for Helm. +# It will not affect optional CRDs such as `ServiceMonitor` and `PrometheusRules` +namespaceOverride: "" + +# -- This field override the default app.kubernetes.io/instance label for all Objects. +instanceLabelOverride: "" + +# -- This field override the default version extracted from image.tag +versionOverride: "" + +# Traefik Hub configuration. See https://doc.traefik.io/traefik-hub/ +hub: + # -- Name of `Secret` with key 'token' set to a valid license token. + # It enables API Gateway. + token: "" + # -- Disables all external network connections. + offline: false + # -- By default, Traefik Hub provider watches all namespaces. When using `rbac.namespaced`, it will watch helm release namespace and namespaces listed in this array. + namespaces: [] # @schema required:true + apimanagement: + # -- Set to true in order to enable API Management. Requires a valid license token. + enabled: false + admission: + # -- WebHook admission server listen address. Default: "0.0.0.0:9943". + listenAddr: "" + # -- Certificate name of the WebHook admission server. Default: "hub-agent-cert". + secretName: "hub-agent-cert" + # -- Set custom certificate for the WebHook admission server. The certificate should be specified with _tls.crt_ and _tls.key_ in base64 encoding. + customWebhookCertificate: {} + # -- Set it to false if you need to disable Traefik Hub pod restart when mutating webhook certificate is updated. It's done with a label update. + restartOnCertificateChange: true + openApi: + # -- When set to true, it will only accept paths and methods that are explicitly defined in its OpenAPI specification + validateRequestMethodAndPath: false + + experimental: + # -- Set to true in order to enable AI Gateway. Requires a valid license token. + aigateway: false + providers: + consulCatalogEnterprise: + # -- Enable Consul Catalog Enterprise backend with default settings. + enabled: false + # -- Use local agent caching for catalog reads. + cache: false + # -- Enable Consul Connect support. + connectAware: false + # -- Consider every service as Connect capable by default. + connectByDefault: false + # -- Constraints is an expression that Traefik matches against the container's labels + constraints: "" + # -- Default rule. + defaultRule: "Host(`{{ normalize .Name }}`)" + endpoint: + # -- The address of the Consul server + address: "" + # -- Data center to use. If not provided, the default agent data center is used + datacenter: "" + # -- WaitTime limits how long a Watch will block. If not provided, the agent default + endpointWaitTime: 0 + httpauth: + # -- Basic Auth password + password: "" + # -- Basic Auth username + username: "" + # -- The URI scheme for the Consul server + scheme: "" + tls: + # -- TLS CA + ca: "" + # -- TLS cert + cert: "" + # -- TLS insecure skip verify + insecureSkipVerify: false + # -- TLS key + key: "" + # -- Token is used to provide a per-request ACL token which overrides the agent's + token: "" + # -- Expose containers by default. + exposedByDefault: true + # -- Sets the namespaces used to discover services (Consul Enterprise only). + namespaces: "" + # -- Sets the partition used to discover services (Consul Enterprise only). + partition: "" + # -- Prefix for consul service tags. + prefix: "traefik" + # -- Interval for check Consul API. + refreshInterval: 15 + # -- Forces the read to be fully consistent. + requireConsistent: false + # -- Name of the Traefik service in Consul Catalog (needs to be registered via the + serviceName: "traefik" + # -- Use stale consistency for catalog reads. + stale: false + # -- A list of service health statuses to allow taking traffic. + strictChecks: "passing, warning" + # -- Watch Consul API events. + watch: false + microcks: + # -- Enable Microcks provider. + enabled: false + auth: + # -- Microcks API client ID. + clientId: "" + # -- Microcks API client secret. + clientSecret: "" + # -- Microcks API endpoint. + endpoint: "" + # -- Microcks API token. + token: "" + # -- Microcks API endpoint. + endpoint: "" + # -- Polling interval for Microcks API. + pollInterval: 30 + # -- Polling timeout for Microcks API. + pollTimeout: 5 + tls: + # -- TLS CA + ca: "" + # -- TLS cert + cert: "" + # -- TLS insecure skip verify + insecureSkipVerify: false + # -- TLS key + key: "" + redis: + # -- Enable Redis Cluster. Default: true. + cluster: # @schema type:[boolean, null] + # -- Database used to store information. Default: "0". + database: # @schema type:[string, null] + # -- Endpoints of the Redis instances to connect to. Default: "". + endpoints: "" + # -- The username to use when connecting to Redis endpoints. Default: "". + username: "" + # -- The password to use when connecting to Redis endpoints. Default: "". + password: "" + sentinel: + # -- Name of the set of main nodes to use for main selection. Required when using Sentinel. Default: "". + masterset: "" + # -- Username to use for sentinel authentication (can be different from endpoint username). Default: "". + username: "" + # -- Password to use for sentinel authentication (can be different from endpoint password). Default: "". + password: "" + # -- Timeout applied on connection with redis. Default: "0s". + timeout: "" + tls: + # -- Path to the certificate authority used for the secured connection. + ca: "" + # -- Path to the public certificate used for the secure connection. + cert: "" + # -- Path to the private key used for the secure connection. + key: "" + # -- When insecureSkipVerify is set to true, the TLS connection accepts any certificate presented by the server. Default: false. + insecureSkipVerify: false + # Enable export of errors logs to the platform. Default: true. + sendlogs: # @schema type:[boolean, null] + + tracing: + # -- Tracing headers to duplicate. + # To configure the following, tracing.otlp.enabled needs to be set to true. + additionalTraceHeaders: + enabled: false + traceContext: + # -- Name of the header that will contain the parent-id header copy. + parentId: "" + # -- Name of the header that will contain the trace-id copy. + traceId: "" + # -- Name of the header that will contain the traceparent copy. + traceParent: "" + # -- Name of the header that will contain the tracestate copy. + traceState: "" + +# -- Required for OCI Marketplace integration. +# See https://docs.public.content.oci.oraclecloud.com/en-us/iaas/Content/Marketplace/understanding-helm-charts.htm +oci_meta: + # -- Enable specific values for Oracle Cloud Infrastructure + enabled: false + # -- It needs to be an ocir repo + repo: traefik + images: + proxy: + image: traefik + tag: latest + hub: + image: traefik-hub + tag: latest diff --git a/argo/apps/100-traefik/values.yaml b/argo/apps/100-traefik/values.yaml index 2040dcf..18867ca 100644 --- a/argo/apps/100-traefik/values.yaml +++ b/argo/apps/100-traefik/values.yaml @@ -8,9 +8,31 @@ traefik: enabled: true # Use the default entrypoint for the dashboard entryPoints: - - websecure + - websecure # Define the matching rule for the dashboard route matchRule: Host(`traefik.localhost`) + ports: + web: + transport: + respondingTimeouts: + readTimeout: 3600s + writeTimeout: 3600s + idleTimeout: 3600s + keepAliveMaxTime: 3600s + keepAliveMaxRequests: 1000 + websecure: + transport: + respondingTimeouts: + readTimeout: 3600s + writeTimeout: 3600s + idleTimeout: 3600s + keepAliveMaxTime: 3600s + keepAliveMaxRequests: 1000 + logs: + general: + level: DEBUG + access: + enabled: true # Enable Kubernetes providers providers: diff --git a/argo/apps/101-hetzner-csi/Chart.yaml b/argo/apps/101-hetzner-csi/Chart.yaml new file mode 100644 index 0000000..d9e4edc --- /dev/null +++ b/argo/apps/101-hetzner-csi/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: hetzner-csi-wrapper +description: A wrapper Helm chart to deploy Hetzner CSI with custom values. +version: 0.1.0 # Version of this wrapper chart +appVersion: "v25.0.0" # Corresponds to the Traefik chart version we depend on + +dependencies: +- name: hcloud-csi + version: "v2.15.0" # The version of the Traefik chart to use + repository: https://charts.hetzner.cloud # The repository of the dependency + # We need to map the values from our local values.yaml to the subchart. + # By default, values under a key matching the dependency name are passed. + # So, values for 'traefik' in our values.yaml will go to the traefik subchart. diff --git a/argo/apps/101-hetzner-csi/application.yaml b/argo/apps/101-hetzner-csi/application.yaml new file mode 100644 index 0000000..6b05183 --- /dev/null +++ b/argo/apps/101-hetzner-csi/application.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: hetzner-csi + namespace: argocd + # Add finalizer to ensure that Helm release is deleted before the app + finalizers: + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer +spec: + project: default + source: + # Source is the Git repository containing this Application manifest and the wrapper chart + repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git + path: argo/apps/101-hetzner-csi # Path to the wrapper chart directory within the Git repo + targetRevision: cloudfleet # Or your specific branch/tag + + # Helm configuration for the wrapper chart + helm: + # releaseName is optional here, defaults based on app name + releaseName: hetzner-csi + # Values file is implicitly values.yaml within the source path + # valueFiles: # Not needed if using default values.yaml + # - values.yaml + destination: + server: https://kubernetes.default.svc + namespace: kube-system # Deploy Traefik into its own namespace + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: [] diff --git a/argo/apps/101-hetzner-csi/templates/hcloud-secret.yaml b/argo/apps/101-hetzner-csi/templates/hcloud-secret.yaml new file mode 100644 index 0000000..15bf122 --- /dev/null +++ b/argo/apps/101-hetzner-csi/templates/hcloud-secret.yaml @@ -0,0 +1,18 @@ +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: hcloud-token + namespace: kube-system +spec: + refreshInterval: 300s + secretStoreRef: + name: vault-backend + kind: ClusterSecretStore + target: + name: hcloud + creationPolicy: Owner + data: + - secretKey: token + remoteRef: + key: cloud/hetzner + property: roottoken diff --git a/argo/apps/101-hetzner-csi/values.yaml b/argo/apps/101-hetzner-csi/values.yaml new file mode 100644 index 0000000..2040dcf --- /dev/null +++ b/argo/apps/101-hetzner-csi/values.yaml @@ -0,0 +1,20 @@ +# Values for the traefik-wrapper chart + +# Values passed to the 'traefik' subchart (dependency) +traefik: + # Enable dashboard access (consider security implications for production) + ingressRoute: + dashboard: + enabled: true + # Use the default entrypoint for the dashboard + entryPoints: + - websecure + # Define the matching rule for the dashboard route + matchRule: Host(`traefik.localhost`) + + # Enable Kubernetes providers + providers: + kubernetesCRD: + enabled: true # Enable CRD provider (for IngressRoute, ServersTransport, etc.) + kubernetesIngress: + enabled: true diff --git a/argo/apps/102-cert-manager/Chart.yaml b/argo/apps/102-cert-manager/Chart.yaml new file mode 100644 index 0000000..8df0d40 --- /dev/null +++ b/argo/apps/102-cert-manager/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: cert-manager-wrapper +description: A wrapper to deploy cert-manager with custom values. +version: 0.1.0 # Version of this wrapper chart +appVersion: "0.18.0" # Corresponds to the chart version we depend on (check for latest stable) + +dependencies: +- name: cert-manager + version: "1.18.0" # Specify the Airflow chart version + repository: https://charts.jetstack.io # Official Jetstack repository for cert-manager diff --git a/argo/apps/102-cert-manager/application.yaml b/argo/apps/102-cert-manager/application.yaml new file mode 100644 index 0000000..1e43a7b --- /dev/null +++ b/argo/apps/102-cert-manager/application.yaml @@ -0,0 +1,28 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: cert-manager + namespace: argocd + finalizers: + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer +spec: + project: default + source: + # Source is the Git repository containing this Application manifest and the wrapper chart + repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git # Ensure this is your repo URL + path: argo/apps/102-cert-manager # Path to the wrapper chart directory within the Git repo + targetRevision: cloudfleet # Or your specific branch/tag + + # Helm configuration for the wrapper chart + helm: + releaseName: cert-manager + # Values file is implicitly values.yaml within the source path + destination: + server: https://kubernetes.default.svc + namespace: operators # Deploy Airflow into the services namespace + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true # Ensure the services namespace is created diff --git a/argo/apps/102-cert-manager/templates/issuer-prd.yaml b/argo/apps/102-cert-manager/templates/issuer-prd.yaml new file mode 100644 index 0000000..2a6b4b2 --- /dev/null +++ b/argo/apps/102-cert-manager/templates/issuer-prd.yaml @@ -0,0 +1,15 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + # server: https://acme-staging-v02.api.letsencrypt.org/directory + server: https://acme-v02.api.letsencrypt.org/directory + email: jonathan.merlevede@dataminded.com + privateKeySecretRef: + name: letsencrypt-prod + solvers: + - http01: + ingress: + class: traefik diff --git a/argo/apps/102-cert-manager/templates/issuer-staging.yaml b/argo/apps/102-cert-manager/templates/issuer-staging.yaml new file mode 100644 index 0000000..7b97f18 --- /dev/null +++ b/argo/apps/102-cert-manager/templates/issuer-staging.yaml @@ -0,0 +1,14 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + server: https://acme-staging-v02.api.letsencrypt.org/directory + email: jonathan.merlevede@dataminded.com + privateKeySecretRef: + name: letsencrypt-staging + solvers: + - http01: + ingress: + class: traefik diff --git a/argo/apps/102-cert-manager/values.yaml b/argo/apps/102-cert-manager/values.yaml new file mode 100644 index 0000000..0847d5b --- /dev/null +++ b/argo/apps/102-cert-manager/values.yaml @@ -0,0 +1,6 @@ +cert-manager: + crds: + enabled: true # Enable CRDs installation + keep: true # Keep CRDs after uninstalling the chart + prometheus: + enabled: false diff --git a/argo/apps/150-argocd-ingress/Chart.yaml b/argo/apps/150-argocd-ingress/Chart.yaml new file mode 100644 index 0000000..b4d7390 --- /dev/null +++ b/argo/apps/150-argocd-ingress/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: argocd-ingress +description: A Helm chart for ArgoCD ingress configuration +type: application +version: 0.1.0 +appVersion: "1.0.0" +maintainers: +- name: Dataminded + email: jonathan.merlevede@dataminded.com diff --git a/argo/apps/150-argocd-ingress/application.yaml b/argo/apps/150-argocd-ingress/application.yaml new file mode 100644 index 0000000..2c474e0 --- /dev/null +++ b/argo/apps/150-argocd-ingress/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: argocd-ingress + namespace: argocd + finalizers: + - argocd.argoproj.io/resources-finalizer +spec: + project: default + source: + repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git + path: argo/apps/150-argocd-ingress + targetRevision: cloudfleet + helm: + releaseName: argocd-ingress + destination: + server: https://kubernetes.default.svc + namespace: argocd + syncPolicy: + automated: + prune: true + selfHeal: true diff --git a/argo/apps/150-argocd-ingress/templates/certificate.yaml b/argo/apps/150-argocd-ingress/templates/certificate.yaml new file mode 100644 index 0000000..470c4ac --- /dev/null +++ b/argo/apps/150-argocd-ingress/templates/certificate.yaml @@ -0,0 +1,11 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: argocd-cert +spec: + secretName: argocd-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - argocd.cloudfleet.platform.5ha.re diff --git a/argo/apps/150-argocd-ingress/templates/ingress.yaml b/argo/apps/150-argocd-ingress/templates/ingress.yaml new file mode 100644 index 0000000..1bb2bdc --- /dev/null +++ b/argo/apps/150-argocd-ingress/templates/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: argocd-server-ingress + namespace: argocd + annotations: + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" + ingress.kubernetes.io/ssl-redirect: "false" +spec: + rules: + - host: argocd.cloudfleet.platform.5ha.re + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: argocd-server + port: + number: 80 + tls: + - secretName: argocd-tls diff --git a/argo/apps/150-argocd-ingress/values.yaml b/argo/apps/150-argocd-ingress/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/argo/apps/180-zot/Chart.yaml b/argo/apps/180-zot/Chart.yaml new file mode 100644 index 0000000..411dcbe --- /dev/null +++ b/argo/apps/180-zot/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: zot-wrapper +description: A wrapper Helm chart to deploy zot registry with custom values and S3 storage. +version: 0.1.0 # Version of this wrapper chart +appVersion: "v2.1.4" # Corresponds to the zot version we deploy + +dependencies: +- name: zot + version: "0.1.72" # The version of the zot chart to use + repository: http://zotregistry.dev/helm-charts # The repository of the dependency + # Values under the 'zot' key in our values.yaml will be passed to the subchart. diff --git a/argo/apps/180-zot/application.yaml b/argo/apps/180-zot/application.yaml new file mode 100644 index 0000000..13fad8a --- /dev/null +++ b/argo/apps/180-zot/application.yaml @@ -0,0 +1,27 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: zot + namespace: argocd + # Add finalizer to ensure that Helm release is deleted before the app + finalizers: + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer +spec: + project: default + source: + # Source is the Git repository containing this Application manifest and the wrapper chart + repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git + path: argo/apps/180-zot # Path to the zot wrapper chart directory + targetRevision: cloudfleet # Or your specific branch/tag + # Helm configuration for the wrapper chart + helm: + releaseName: zot # Helm release name + destination: + server: https://kubernetes.default.svc + namespace: services # Deploy zot into the services namespace + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true # Ensure the services namespace is created diff --git a/argo/apps/180-zot/templates/certificate.yaml b/argo/apps/180-zot/templates/certificate.yaml new file mode 100644 index 0000000..bb0ecda --- /dev/null +++ b/argo/apps/180-zot/templates/certificate.yaml @@ -0,0 +1,11 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: zot-cert +spec: + secretName: zot-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - zot.cloudfleet.platform.5ha.re diff --git a/argo/apps/180-zot/templates/s3-credentials.yaml b/argo/apps/180-zot/templates/s3-credentials.yaml new file mode 100644 index 0000000..61959b3 --- /dev/null +++ b/argo/apps/180-zot/templates/s3-credentials.yaml @@ -0,0 +1,26 @@ +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: zot-s3-credentials +spec: + refreshInterval: 300s # Refresh every 5 minutes + secretStoreRef: + name: vault-backend + kind: ClusterSecretStore + data: + - secretKey: AWS_ACCESS_KEY_ID # Standard AWS SDK environment variable + remoteRef: + key: platform/s3-credentials # Path in Vault + property: ACCESS_KEY_ID # Property to extract from the Vault secret + - secretKey: AWS_SECRET_ACCESS_KEY # Standard AWS SDK environment variable + remoteRef: + key: platform/s3-credentials # Path in Vault + property: SECRET_ACCESS_KEY # Property to extract from the Vault secret + - secretKey: AWS_REGION # Standard AWS SDK environment variable + remoteRef: + key: platform/s3-credentials # Path in Vault + property: REGION # Property to extract from the Vault secret + - secretKey: AWS_ENDPOINT_URL # Standard AWS SDK environment variable for custom endpoints + remoteRef: + key: platform/s3-credentials # Path in Vault + property: ENDPOINT # Property to extract from the Vault secret diff --git a/argo/apps/180-zot/templates/traefik-middleware.yaml b/argo/apps/180-zot/templates/traefik-middleware.yaml new file mode 100644 index 0000000..c73d5c7 --- /dev/null +++ b/argo/apps/180-zot/templates/traefik-middleware.yaml @@ -0,0 +1,22 @@ +# apiVersion: middlewares.traefik.io/v1alpha1 +# kind: Middleware +# metadata: +# name: zot-registry +# spec: +# # Retry on network errors and timeouts +# retry: +# attempts: 3 +# initialInterval: "100ms" +# --- +# apiVersion: middlewares.traefik.io/v1alpha1 +# kind: Middleware +# metadata: +# name: zot-buffering +# spec: +# # Configure buffering for large requests +# buffering: +# maxRequestBodyBytes: 0 # Unlimited request body size +# maxResponseBodyBytes: 0 # Unlimited response body size +# memRequestBodyBytes: 2097152 # 2MB memory buffer +# memResponseBodyBytes: 2097152 # 2MB memory buffer +# retryExpression: "IsNetworkError() && Attempts() <= 2" diff --git a/argo/apps/180-zot/templates/zot-config.yaml b/argo/apps/180-zot/templates/zot-config.yaml new file mode 100644 index 0000000..6f2c8c2 --- /dev/null +++ b/argo/apps/180-zot/templates/zot-config.yaml @@ -0,0 +1,96 @@ +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: zot-config +spec: + refreshInterval: 300s # Refresh every 5 minutes + secretStoreRef: + name: vault-backend + kind: ClusterSecretStore + target: + name: zot-config + template: + engineVersion: v2 + #"distSpecVersion": "1.1.1", + data: + config.json: | + { + "storage": { + "rootDirectory": "/var/lib/registry", + "dedupe": false, + "storageDriver": { + "name": "s3", + "rootdirectory": "/platform/zot", + "region": "{{`{{ .region }}`}}", + "regionendpoint": "{{`{{ .endpoint }}`}}", + "bucket": "inno-days-bucket", + "accesskey": "{{`{{ .access_key_id }}`}}", + "secretkey": "{{`{{ .secret_access_key }}`}}", + "secure": true, + "skipverify": false + } + }, + "http": { + "address": "0.0.0.0", + "port": "5000", + "compat": ["docker2s2"] + }, + "log": { + "level": "debug", + "output": "/dev/stdout", + "audit": "/dev/stdout" + }, + "extensions": { + "search": { + "enable": true + }, + "ui": { + "enable": true + }, + "sync": { + "enable": true, + "downloadDir": "/tmp/zot-sync", + "registries": [ + { + "urls": ["https://k8s.gcr.io"], + "onDemand": true, + "tlsVerify": true, + "content": [ + { + "prefix": "**", + "destination": "/k8s" + } + ] + }, + { + "urls": ["https://docker.io/library"], + "onDemand": true, + "tlsVerify": true, + "content": [ + { + "prefix": "**", + "destination": "/dockerhub" + } + ] + } + ] + } + } + } + data: + - secretKey: access_key_id + remoteRef: + key: platform/s3-credentials + property: ACCESS_KEY_ID + - secretKey: secret_access_key + remoteRef: + key: platform/s3-credentials + property: SECRET_ACCESS_KEY + - secretKey: region + remoteRef: + key: platform/s3-credentials + property: REGION + - secretKey: endpoint + remoteRef: + key: platform/s3-credentials + property: ENDPOINT diff --git a/argo/apps/180-zot/values-default.yaml b/argo/apps/180-zot/values-default.yaml new file mode 100644 index 0000000..ab88389 --- /dev/null +++ b/argo/apps/180-zot/values-default.yaml @@ -0,0 +1,211 @@ +# Default values for zot. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 +image: + repository: ghcr.io/project-zot/zot-linux-amd64 + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "v2.1.4" +# Defaults to the release namespace if not specified +namespace: "" +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" +service: + type: NodePort + port: 5000 + nodePort: null # Set to a specific port if type is NodePort + # Annotations to add to the service + annotations: {} + # Set to a static IP if a static IP is desired, only works when + # type: ClusterIP + clusterIP: null +# Enabling this will publicly expose your zot server +# Only enable this if you have security enabled on your cluster +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # If using nginx, disable body limits and increase read and write timeouts + # nginx.ingress.kubernetes.io/proxy-body-size: "0" + # nginx.ingress.kubernetes.io/proxy-read-timeout: "600" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "600" + className: "nginx" + pathtype: ImplementationSpecific + hosts: + - host: chart-example.local + paths: + - path: / + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # By default, Kubernetes HTTP probes use HTTP 'scheme'. So if TLS is enabled + # in configuration, to prevent failures, the scheme must be set to 'HTTPS'. +httpGet: + scheme: HTTP +# By default, Kubernetes considers a Pod healthy if the liveness probe returns +# successfully. However, sometimes applications need additional startup time on +# their first initialization. By defining a startupProbe, we can allow the +# application to take extra time for initialization without compromising fast +# response to deadlocks. +startupProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + failureThreshold: 3 +# If mountConfig is true the configMap named $CHART_RELEASE-config is mounted +# on the pod's '/etc/zot' directory +mountConfig: false +# If mountConfig is true the chart creates the '$CHART_RELEASE-config', if it +# does not exist the user is in charge of managing it (as this file includes a +# sample file you have to add it empty to handle it externally). +configFiles: + config.json: |- + { + "storage": { "rootDirectory": "/var/lib/registry" }, + "http": { "address": "0.0.0.0", "port": "5000" }, + "log": { "level": "debug" } + } +# Alternatively, the configuration can include authentication and acessControl +# data and we can use mountSecret option for the passwords. +# +# config.json: |- +# { +# "storage": { "rootDirectory": "/var/lib/registry" }, +# "http": { +# "address": "0.0.0.0", +# "port": "5000", +# "auth": { "htpasswd": { "path": "/secret/htpasswd" } }, +# "accessControl": { +# "repositories": { +# "**": { +# "policies": [{ +# "users": ["user"], +# "actions": ["read"] +# }], +# "defaultPolicy": [] +# } +# }, +# "adminPolicy": { +# "users": ["admin"], +# "actions": ["read", "create", "update", "delete"] +# } +# } +# }, +# "log": { "level": "debug" } +# } + +# externalSecrets allows to mount external (meaning not managed by this chart) +# Kubernetes secrets within the Zot container. +# The secret is identified by its name (property "secretName") and should be +# present in the same namespace. The property "mountPath" specifies the path +# within the container filesystem where the secret is mounted. +# +# Below is an example: +# +# externalSecrets: +# - secretName: "secret1" +# mountPath: "/secrets/s1" +# - secretName: "secret2" +# mountPath: "/secrets/s2" +externalSecrets: [] +# If mountSecret is true, the Secret named $CHART_RELEASE-secret is mounted on +# the pod's '/secret' directory (it is used to keep files with passwords, like +# a `htpasswd` file) +mountSecret: false +# If secretFiles does not exist the user is in charge of managing it, again, if +# you want to manage it the value has to be added empty to avoid using this one +secretFiles: + # Example htpasswd with 'admin:admin' & 'user:user' user:pass pairs + htpasswd: |- + admin:$2y$05$vmiurPmJvHylk78HHFWuruFFVePlit9rZWGA/FbZfTEmNRneGJtha + user:$2y$05$L86zqQDfH5y445dcMlwu6uHv.oXFgT6AiJCwpv3ehr7idc0rI3S2G +# Authentication string for Kubernetes probes, which is needed when `htpasswd` +# authentication is enabled, but the anonymous access policy is not. +# It contains a `user:password` string encoded in base64. The example value is +# from running `echo -n "foo:var" | base64` +# authHeader: "Zm9vOmJhcg==" + +# If persistence is 'true' the service uses a persistentVolumeClaim to mount a +# volume for zot on '/var/lib/registry'; by default the pvc used is named +# '$CHART_RELEASE-pvc', but the name can be changed below +persistence: false +# PVC data, only used if persistence is 'true' +pvc: + # Make the chart create the PVC, this option is used with storageClasses that + # can create volumes dynamically, if that is not the case is better to do it + # manually and set create to false + create: false + # Name of the PVC to use or create if persistence is enabled, if not set the + # value '$CHART_RELEASE-pvc' is used + name: null + # Volume access mode, if using more than one replica we need + accessModes: [ "ReadWriteOnce" ] + # Size of the volume requested + storage: 8Gi + # Name of the storage class to use if it is different than the default one + storageClassName: null +# List of environment variables to set on the container +env: # - name: "TEST" + +# value: "ME" +# - name: SECRET_NAME +# valueFrom: +# secretKeyRef: +# name: mysecret +# key: username + +# Extra Volume Mounts +extraVolumeMounts: [] +# - name: data +# mountPath: /var/lib/registry + +# Extra Volumes +extraVolumes: [] +# - name: data +# emptyDir: {} + +# Deployment strategy type +strategy: + type: RollingUpdate +# rollingUpdate: +# maxUnavailable: 25% + +# Extra args to pass to the deployment's container +extraArgs: [] +podAnnotations: {} +podLabels: {} +deploymentAnnotations: {} +priorityClassName: "" +dnsConfig: {} +dnsPolicy: "ClusterFirst" +# Metrics configuration +# NOTE: need enable metric extension in config.json +metrics: + # Start a prometheus exporter + enabled: false + # Prometheus Operator ServiceMonitor configuration + serviceMonitor: + # Start a ServiceMonitor for Prometheus Operator + enabled: false + # Specify the interval at which metrics should be scraped + interval: "30s" + # Specify the path to scrape metrics from + path: "/metrics" + # basicAuth credentials for serviceMonitor + basicAuth: + secretName: basic-auth + usernameKey: username + passwordKey: password +# Test hooks configuration +test: + image: + repository: alpine + tag: "3.18" diff --git a/argo/apps/180-zot/values.yaml b/argo/apps/180-zot/values.yaml new file mode 100644 index 0000000..2a90fc1 --- /dev/null +++ b/argo/apps/180-zot/values.yaml @@ -0,0 +1,70 @@ +# Custom values for the zot Helm chart +# Values placed under the 'zot:' key will be passed to the subchart. +zot: + # Use the multi-arch image instead of the AMD64-specific one + image: + # use zot multi-arch image + repository: ghcr.io/project-zot/zot + tag: "v2.1.4" + + # Ensure zot runs on AMD64 nodes (or use ARM64 image instead) + # nodeSelector: + # kubernetes.io/arch: amd64 + + # Enable debug logging and access logs + env: + - name: ZOT_LOG_LEVEL + value: "debug" + # Mount only the config.json key from the secret to the specific file + extraVolumes: + - name: zot-config-volume + secret: + secretName: zot-config + items: + - key: config.json + path: config.json + - name: zot-sync-volume + emptyDir: {} + resources: + requests: + cpu: 128m + memory: 512Mi + limits: + cpu: 1000m + memory: 512Mi + extraVolumeMounts: + - name: zot-config-volume + mountPath: /etc/zot/config.json + subPath: config.json + readOnly: true + - name: zot-sync-volume + mountPath: /tmp/zot-sync + readOnly: false + # Service configuration + service: + type: ClusterIP + port: 5000 + annotations: + # Configure service timeout for large uploads + service.beta.kubernetes.io/timeout: "3600" + + # Configure ingress for zot + ingress: + enabled: true + className: traefik + annotations: + # Use Traefik's websecure entrypoint for HTTPS + traefik.ingress.kubernetes.io/router.entrypoints: websecure + # Enable TLS termination at the router (Traefik handles certs) + traefik.ingress.kubernetes.io/router.tls: "true" + # Increase body size limit and disable buffering + # traefik.ingress.kubernetes.io/router.middlewares: services-zot-registry@kubernetescrd + hosts: + - host: zot.cloudfleet.platform.5ha.re + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - zot.cloudfleet.platform.5ha.re + secretName: zot-tls diff --git a/argo/apps/200-airflow/airflow.yaml b/argo/apps/200-airflow/application.yaml similarity index 79% rename from argo/apps/200-airflow/airflow.yaml rename to argo/apps/200-airflow/application.yaml index 027cffb..bd4322a 100644 --- a/argo/apps/200-airflow/airflow.yaml +++ b/argo/apps/200-airflow/application.yaml @@ -4,14 +4,14 @@ metadata: name: airflow namespace: argocd finalizers: - - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer spec: project: default source: # Source is the Git repository containing this Application manifest and the wrapper chart repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git # Ensure this is your repo URL path: argo/apps/200-airflow # Path to the wrapper chart directory within the Git repo - targetRevision: HEAD # Or your specific branch/tag + targetRevision: cloudfleet # Or your specific branch/tag # Helm configuration for the wrapper chart helm: @@ -25,4 +25,4 @@ spec: prune: true selfHeal: true syncOptions: - - CreateNamespace=true # Ensure the services namespace is created + - CreateNamespace=true # Ensure the services namespace is created diff --git a/argo/apps/200-airflow/templates/certificate.yaml b/argo/apps/200-airflow/templates/certificate.yaml new file mode 100644 index 0000000..4561688 --- /dev/null +++ b/argo/apps/200-airflow/templates/certificate.yaml @@ -0,0 +1,11 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: airflow-cert +spec: + secretName: airflow-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - airflow.cloudfleet.platform.5ha.re diff --git a/argo/apps/200-airflow/values.yaml b/argo/apps/200-airflow/values.yaml index 78e1d31..60cfe7e 100644 --- a/argo/apps/200-airflow/values.yaml +++ b/argo/apps/200-airflow/values.yaml @@ -14,6 +14,13 @@ airflow: webserver: service: type: ClusterIP + livenessProbe: + timeoutSeconds: 60 + initialDelaySeconds: 60 + startupProbe: + timeoutSeconds: 120 + periodSeconds: 20 + failureThreshold: 6 workers: persistence: @@ -32,9 +39,11 @@ airflow: traefik.ingress.kubernetes.io/router.tls: "true" # Backend service uses HTTP (Airflow webserver default) # traefik.ingress.kubernetes.io/service.scheme: http # Usually not needed if port is 80/8080 - host: airflow.localhost # Define the hostname + host: airflow.cloudfleet.platform.5ha.re # Define the hostname path: / # Root path - + tls: + enabled: true + secretName: airflow-tls createUserJob: useHelmHooks: false diff --git a/argo/apps/300-vault/vault.yaml b/argo/apps/300-vault/application.yaml similarity index 78% rename from argo/apps/300-vault/vault.yaml rename to argo/apps/300-vault/application.yaml index 5d0c25f..fcdc8a8 100644 --- a/argo/apps/300-vault/vault.yaml +++ b/argo/apps/300-vault/application.yaml @@ -4,14 +4,14 @@ metadata: name: vault namespace: argocd finalizers: - - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer spec: project: default source: # Source is the Git repository containing this Application manifest and the wrapper chart repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git # Ensure this is your repo URL path: argo/apps/300-vault # Path to the wrapper chart directory within the Git repo - targetRevision: HEAD # Or your specific branch/tag + targetRevision: cloudfleet # Or your specific branch/tag # Helm configuration for the wrapper chart helm: @@ -25,4 +25,4 @@ spec: prune: true selfHeal: true syncOptions: - - CreateNamespace=true # Ensure the services namespace is created (if not already) + - CreateNamespace=true # Ensure the services namespace is created (if not already) diff --git a/argo/apps/300-vault/templates/certificate.yaml b/argo/apps/300-vault/templates/certificate.yaml new file mode 100644 index 0000000..c9acaf6 --- /dev/null +++ b/argo/apps/300-vault/templates/certificate.yaml @@ -0,0 +1,11 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: vault-cert +spec: + secretName: vault-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - vault.cloudfleet.platform.5ha.re diff --git a/argo/apps/300-vault/values-defaults.yaml b/argo/apps/300-vault/values-defaults.yaml new file mode 100644 index 0000000..41c59a2 --- /dev/null +++ b/argo/apps/300-vault/values-defaults.yaml @@ -0,0 +1,1351 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Available parameters and their default values for the Vault chart. + +global: + # enabled is the master enabled switch. Setting this to true or false + # will enable or disable all the components within this chart by default. + enabled: true + + # The namespace to deploy to. Defaults to the `helm` installation namespace. + namespace: "" + + # Image pull secret to use for registry authentication. + # Alternatively, the value may be specified as an array of strings. + imagePullSecrets: [] + # imagePullSecrets: + # - name: image-pull-secret + + # TLS for end-to-end encrypted transport + tlsDisable: true + + # External vault server address for the injector and CSI provider to use. + # Setting this will disable deployment of a vault server. + externalVaultAddr: "" + + # If deploying to OpenShift + openshift: false + + # Create PodSecurityPolicy for pods + psp: + enable: false + # Annotation for PodSecurityPolicy. + # This is a multi-line templated string map, and can also be set as YAML. + annotations: | + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default + + serverTelemetry: + # Enable integration with the Prometheus Operator + # See the top level serverTelemetry section below before enabling this feature. + prometheusOperator: false + +injector: + # True if you want to enable vault agent injection. + # @default: global.enabled + enabled: "-" + + replicas: 1 + + # Configures the port the injector should listen on + port: 8080 + + # If multiple replicas are specified, by default a leader will be determined + # so that only one injector attempts to create TLS certificates. + leaderElector: + enabled: true + + # If true, will enable a node exporter metrics endpoint at /metrics. + metrics: + enabled: false + + # Deprecated: Please use global.externalVaultAddr instead. + externalVaultAddr: "" + + # image sets the repo and tag of the vault-k8s image to use for the injector. + image: + repository: "hashicorp/vault-k8s" + tag: "1.6.2" + pullPolicy: IfNotPresent + + # agentImage sets the repo and tag of the Vault image to use for the Vault Agent + # containers. This should be set to the official Vault image. Vault 1.3.1+ is + # required. + agentImage: + repository: "hashicorp/vault" + tag: "1.19.0" + + # The default values for the injected Vault Agent containers. + agentDefaults: + # For more information on configuring resources, see the K8s documentation: + # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + cpuLimit: "500m" + cpuRequest: "250m" + memLimit: "128Mi" + memRequest: "64Mi" + # ephemeralLimit: "128Mi" + # ephemeralRequest: "64Mi" + + # Default template type for secrets when no custom template is specified. + # Possible values include: "json" and "map". + template: "map" + + # Default values within Agent's template_config stanza. + templateConfig: + exitOnRetryFailure: true + staticSecretRenderInterval: "" + + # Used to define custom livenessProbe settings + livenessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 2 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 5 + # Used to define custom readinessProbe settings + readinessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 2 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 5 + # Used to define custom startupProbe settings + startupProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 12 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 5 + + # Mount Path of the Vault Kubernetes Auth Method. + authPath: "auth/kubernetes" + + # Configures the log verbosity of the injector. + # Supported log levels include: trace, debug, info, warn, error + logLevel: "info" + + # Configures the log format of the injector. Supported log formats: "standard", "json". + logFormat: "standard" + + # Configures all Vault Agent sidecars to revoke their token when shutting down + revokeOnShutdown: false + + webhook: + # Configures failurePolicy of the webhook. The "unspecified" default behaviour depends on the + # API Version of the WebHook. + # To block pod creation while the webhook is unavailable, set the policy to `Fail` below. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy + # + failurePolicy: Ignore + + # matchPolicy specifies the approach to accepting changes based on the rules of + # the MutatingWebhookConfiguration. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy + # for more details. + # + matchPolicy: Exact + + # timeoutSeconds is the amount of seconds before the webhook request will be ignored + # or fails. + # If it is ignored or fails depends on the failurePolicy + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#timeouts + # for more details. + # + timeoutSeconds: 30 + + # namespaceSelector is the selector for restricting the webhook to only + # specific namespaces. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector + # for more details. + # Example: + # namespaceSelector: + # matchLabels: + # sidecar-injector: enabled + namespaceSelector: {} + + # objectSelector is the selector for restricting the webhook to only + # specific labels. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + # for more details. + # Example: + # objectSelector: + # matchLabels: + # vault-sidecar-injector: enabled + objectSelector: | + matchExpressions: + - key: app.kubernetes.io/name + operator: NotIn + values: + - {{ template "vault.name" . }}-agent-injector + + # Extra annotations to attach to the webhook + annotations: {} + + # Deprecated: please use 'webhook.failurePolicy' instead + # Configures failurePolicy of the webhook. The "unspecified" default behaviour depends on the + # API Version of the WebHook. + # To block pod creation while webhook is unavailable, set the policy to `Fail` below. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy + # + failurePolicy: Ignore + + # Deprecated: please use 'webhook.namespaceSelector' instead + # namespaceSelector is the selector for restricting the webhook to only + # specific namespaces. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector + # for more details. + # Example: + # namespaceSelector: + # matchLabels: + # sidecar-injector: enabled + namespaceSelector: {} + + # Deprecated: please use 'webhook.objectSelector' instead + # objectSelector is the selector for restricting the webhook to only + # specific labels. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + # for more details. + # Example: + # objectSelector: + # matchLabels: + # vault-sidecar-injector: enabled + objectSelector: {} + + # Deprecated: please use 'webhook.annotations' instead + # Extra annotations to attach to the webhook + webhookAnnotations: {} + + certs: + # secretName is the name of the secret that has the TLS certificate and + # private key to serve the injector webhook. If this is null, then the + # injector will default to its automatic management mode that will assign + # a service account to the injector to generate its own certificates. + secretName: null + + # caBundle is a base64-encoded PEM-encoded certificate bundle for the CA + # that signed the TLS certificate that the webhook serves. This must be set + # if secretName is non-null unless an external service like cert-manager is + # keeping the caBundle updated. + caBundle: "" + + # certName and keyName are the names of the files within the secret for + # the TLS cert and private key, respectively. These have reasonable + # defaults but can be customized if necessary. + certName: tls.crt + keyName: tls.key + + # Security context for the pod template and the injector container + # The default pod securityContext is: + # runAsNonRoot: true + # runAsGroup: {{ .Values.injector.gid | default 1000 }} + # runAsUser: {{ .Values.injector.uid | default 100 }} + # fsGroup: {{ .Values.injector.gid | default 1000 }} + # and for container is + # allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + securityContext: + pod: {} + container: {} + + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # extraEnvironmentVars is a list of extra environment variables to set in the + # injector deployment. + extraEnvironmentVars: {} # KUBERNETES_SERVICE_HOST: kubernetes.default.svc + + # Affinity Settings for injector pods + # This can either be a multi-line string or YAML matching the PodSpec's affinity field. + # Commenting out or setting as empty the affinity variable, will allow + # deployment of multiple replicas to single node services such as Minikube. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: webhook + topologyKey: kubernetes.io/hostname + + # Topology settings for injector pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for injector pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Priority class for injector pods + priorityClassName: "" + + # Extra annotations to attach to the injector pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the injector pods + annotations: {} + + # Extra labels to attach to the agent-injector + # This should be a YAML map of the labels to apply to the injector + extraLabels: {} + + # Should the injector pods run on the host network (useful when using + # an alternate CNI in EKS) + hostNetwork: false + + # Injector service specific config + service: + # Extra annotations to attach to the injector service + annotations: {} + + # Injector serviceAccount specific config + serviceAccount: + # Extra annotations to attach to the injector serviceAccount + annotations: {} + + # A disruption budget limits the number of pods of a replicated application + # that are down simultaneously from voluntary disruptions + podDisruptionBudget: {} + # podDisruptionBudget: + # maxUnavailable: 1 + + # strategy for updating the deployment. This can be a multi-line string or a + # YAML map. + strategy: {} + # strategy: | + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + # type: RollingUpdate + +server: + # If true, or "-" with global.enabled true, Vault server will be installed. + # See vault.mode in _helpers.tpl for implementation details. + enabled: "-" + + # [Enterprise Only] This value refers to a Kubernetes secret that you have + # created that contains your enterprise license. If you are not using an + # enterprise image or if you plan to introduce the license key via another + # route, then leave secretName blank ("") or set it to null. + # Requires Vault Enterprise 1.8 or later. + enterpriseLicense: + # The name of the Kubernetes secret that holds the enterprise license. The + # secret must be in the same namespace that Vault is installed into. + secretName: "" + # The key within the Kubernetes secret that holds the enterprise license. + secretKey: "license" + + image: + repository: "hashicorp/vault" + tag: "1.19.0" + # Overrides the default Image Pull Policy + pullPolicy: IfNotPresent + + # Configure the Update Strategy Type for the StatefulSet + # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategyType: "OnDelete" + + # Configure the logging verbosity for the Vault server. + # Supported log levels include: trace, debug, info, warn, error + logLevel: "" + + # Configure the logging format for the Vault server. + # Supported log formats include: standard, json + logFormat: "" + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec. + # By default no direct resource request is made. + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # Ingress allows ingress services to be created to allow external access + # from Kubernetes to access Vault pods. + # If deployment is on OpenShift, the following block is ignored. + # In order to expose the service, use the route section below + ingress: + enabled: false + labels: {} # traffic: external + annotations: {} + # | + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # or + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + # Optionally use ingressClassName instead of deprecated annotation. + # See: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation + ingressClassName: "" + + # As of Kubernetes 1.19, all Ingress Paths must have a pathType configured. The default value below should be sufficient in most cases. + # See: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types for other possible values. + pathType: Prefix + + # When HA mode is enabled and K8s service registration is being used, + # configure the ingress to point to the Vault active service. + activeService: true + hosts: + - host: chart-example.local + paths: [] + ## Extra paths to prepend to the host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # service: + # name: ssl-redirect + # port: + # number: use-annotation + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # hostAliases is a list of aliases to be added to /etc/hosts. Specified as a YAML list. + hostAliases: [] + # - ip: 127.0.0.1 + # hostnames: + # - chart-example.local + + # OpenShift only - create a route to expose the service + # By default the created route will be of type passthrough + route: + enabled: false + + # When HA mode is enabled and K8s service registration is being used, + # configure the route to point to the Vault active service. + activeService: true + + labels: {} + annotations: {} + host: chart-example.local + # tls will be passed directly to the route's TLS config, which + # can be used to configure other termination methods that terminate + # TLS at the router + tls: + termination: passthrough + + # authDelegator enables a cluster role binding to be attached to the service + # account. This cluster role binding can be used to setup Kubernetes auth + # method. See https://developer.hashicorp.com/vault/docs/auth/kubernetes + authDelegator: + enabled: true + + # extraInitContainers is a list of init containers. Specified as a YAML list. + # This is useful if you need to run a script to provision TLS certificates or + # write out configuration files in a dynamic way. + extraInitContainers: null + # # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder, + # # which is defined in the volumes value. + # - name: oauthapp + # image: "alpine" + # command: [sh, -c] + # args: + # - cd /tmp && + # wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz && + # tar -xf oauthapp.xz && + # mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp && + # chmod +x /usr/local/libexec/vault/oauthapp + # volumeMounts: + # - name: plugins + # mountPath: /usr/local/libexec/vault + + # extraContainers is a list of sidecar containers. Specified as a YAML list. + extraContainers: null + + # shareProcessNamespace enables process namespace sharing between Vault and the extraContainers + # This is useful if Vault must be signaled, e.g. to send a SIGHUP for a log rotation + shareProcessNamespace: false + + # extraArgs is a string containing additional Vault server arguments. + extraArgs: "" + + # extraPorts is a list of extra ports. Specified as a YAML list. + # This is useful if you need to add additional ports to the statefulset in dynamic way. + extraPorts: null + # - containerPort: 8300 + # name: http-monitoring + + # Used to define custom readinessProbe settings + readinessProbe: + enabled: true + # If you need to use a http path instead of the default exec + # path: /v1/sys/health?standbyok=true + + # Port number on which readinessProbe will be checked. + port: 8200 + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + # Used to enable a livenessProbe for the pods + livenessProbe: + enabled: false + # Used to define a liveness exec command. If provided, exec is preferred to httpGet (path) as the livenessProbe handler. + execCommand: [] + # - /bin/sh + # - -c + # - /vault/userconfig/mylivenessscript/run.sh + # Path for the livenessProbe to use httpGet as the livenessProbe handler + path: "/v1/sys/health?standbyok=true" + # Port number on which livenessProbe will be checked if httpGet is used as the livenessProbe handler + port: 8200 + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 60 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + + # Optional duration in seconds the pod needs to terminate gracefully. + # See: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ + terminationGracePeriodSeconds: 10 + + # Used to set the sleep time during the preStop step, if custom preStop + # commands are not set. + preStopSleepSeconds: 5 + + # Used to define custom preStop exec commands to run before the pod is + # terminated. If not set, this will default to: + # preStop: + # - "/bin/sh" + # - "-c" + # - "sleep {{ .Values.server.preStopSleepSeconds }} && kill -SIGTERM $(pidof vault)" + preStop: [] + + # Used to define commands to run after the pod is ready. + # This can be used to automate processes such as initialization + # or boostrapping auth methods. + postStart: [] + # - /bin/sh + # - -c + # - /vault/userconfig/myscript/run.sh + + # extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be + # used to include variables required for auto-unseal. + extraEnvironmentVars: {} + # GOOGLE_REGION: global + # GOOGLE_PROJECT: myproject + # GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json + + # extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set. + # These variables take value from existing Secret objects. + extraSecretEnvironmentVars: [] + # - envName: AWS_SECRET_ACCESS_KEY + # secretName: vault + # secretKey: AWS_SECRET_ACCESS_KEY + + # Deprecated: please use 'volumes' instead. + # extraVolumes is a list of extra volumes to mount. These will be exposed + # to Vault in the path `/vault/userconfig//`. The value below is + # an array of objects, examples are shown below. + extraVolumes: [] + # - type: secret (or "configMap") + # name: my-secret + # path: null # default is `/vault/userconfig` + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: null + # - name: plugins + # emptyDir: {} + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: null + # - mountPath: /usr/local/libexec/vault + # name: plugins + # readOnly: true + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + # This should be either a multi-line string or YAML matching the PodSpec's affinity field. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: server + topologyKey: kubernetes.io/hostname + + # Topology settings for server pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for server pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Enables network policy for server pods + networkPolicy: + enabled: false + egress: [] + # egress: + # - to: + # - ipBlock: + # cidr: 10.0.0.0/24 + # ports: + # - protocol: TCP + # port: 443 + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 8200 + protocol: TCP + - port: 8201 + protocol: TCP + + # Priority class for server pods + priorityClassName: "" + + # Extra labels to attach to the server pods + # This should be a YAML map of the labels to apply to the server pods + extraLabels: {} + + # Extra annotations to attach to the server pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the server pods + annotations: {} + + # Add an annotation to the server configmap and the statefulset pods, + # vaultproject.io/config-checksum, that is a hash of the Vault configuration. + # This can be used together with an OnDelete deployment strategy to help + # identify which pods still need to be deleted during a deployment to pick up + # any configuration changes. + includeConfigAnnotation: false + + # Enables a headless service to be used by the Vault Statefulset + service: + enabled: true + # Enable or disable the vault-active service, which selects Vault pods that + # have labeled themselves as the cluster leader with `vault-active: "true"`. + active: + enabled: true + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the active service. + annotations: {} + # Enable or disable the vault-standby service, which selects Vault pods that + # have labeled themselves as a cluster follower with `vault-active: "false"`. + standby: + enabled: true + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the standby service. + annotations: {} + # If enabled, the service selectors will include `app.kubernetes.io/instance: {{ .Release.Name }}` + # When disabled, services may select Vault pods not deployed from the chart. + # Does not affect the headless vault-internal service with `ClusterIP: None` + instanceSelector: + enabled: true + # clusterIP controls whether a Cluster IP address is attached to the + # Vault service within Kubernetes. By default, the Vault service will + # be given a Cluster IP address, set to None to disable. When disabled + # Kubernetes will create a "headless" service. Headless services can be + # used to communicate with pods directly through DNS instead of a round-robin + # load balancer. + # clusterIP: None + + # Configures the service type for the main Vault service. Can be ClusterIP + # or NodePort. + #type: ClusterIP + + # The IP family and IP families options are to set the behaviour in a dual-stack environment. + # Omitting these values will let the service fall back to whatever the CNI dictates the defaults + # should be. + # These are only supported for kubernetes versions >=1.23.0 + # + # Configures the service's supported IP family policy, can be either: + # SingleStack: Single-stack service. The control plane allocates a cluster IP for the Service, using the first configured service cluster IP range. + # PreferDualStack: Allocates IPv4 and IPv6 cluster IPs for the Service. + # RequireDualStack: Allocates Service .spec.ClusterIPs from both IPv4 and IPv6 address ranges. + ipFamilyPolicy: "" + + # Sets the families that should be supported and the order in which they should be applied to ClusterIP as well. + # Can be IPv4 and/or IPv6. + ipFamilies: [] + + # Do not wait for pods to be ready before including them in the services' + # targets. Does not apply to the headless service, which is used for + # cluster-internal communication. + publishNotReadyAddresses: true + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #nodePort: 30000 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #activeNodePort: 30001 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #standbyNodePort: 30002 + + # Port on which Vault server is listening + port: 8200 + # Target port to which the service should be mapped to + targetPort: 8200 + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the service. + annotations: {} + + # This configures the Vault Statefulset to create a PVC for data + # storage when using the file or raft backend storage engines. + # See https://developer.hashicorp.com/vault/docs/configuration/storage to know more + dataStorage: + enabled: true + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/data" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + # Labels to apply to the PVC + labels: {} + + # Persistent Volume Claim (PVC) retention policy + # ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + # Example: + # persistentVolumeClaimRetentionPolicy: + # whenDeleted: Retain + # whenScaled: Retain + persistentVolumeClaimRetentionPolicy: {} + + # This configures the Vault Statefulset to create a PVC for audit + # logs. Once Vault is deployed, initialized, and unsealed, Vault must + # be configured to use this for audit logs. This will be mounted to + # /vault/audit + # See https://developer.hashicorp.com/vault/docs/audit to know more + auditStorage: + enabled: false + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/audit" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + # Labels to apply to the PVC + labels: {} + + # Run Vault in "dev" mode. This requires no further setup, no state management, + # and no initialization. This is useful for experimenting with Vault without + # needing to unseal, store keys, et. al. All data is lost on restart - do not + # use dev mode for anything other than experimenting. + # See https://developer.hashicorp.com/vault/docs/concepts/dev-server to know more + dev: + enabled: false + + # Set VAULT_DEV_ROOT_TOKEN_ID value + devRootToken: "root" + + # Run Vault in "standalone" mode. This is the default mode that will deploy if + # no arguments are given to helm. This requires a PVC for data storage to use + # the "file" backend. This mode is not highly available and should not be scaled + # past a single replica. + standalone: + enabled: "-" + + # config is a raw string of default configuration when using a Stateful + # deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data + # and store data there. This is only used when using a Replica count of 1, and + # using a stateful set. Supported formats are HCL and JSON. + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://developer.hashicorp.com/vault/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: |- + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + # Enable unauthenticated metrics access (necessary for Prometheus Operator) + #telemetry { + # unauthenticated_metrics_access = "true" + #} + } + storage "file" { + path = "/vault/data" + } + + # Example configuration for using auto-unseal, using Google Cloud KMS. The + # GKMS keys must already exist, and the cluster must have a service account + # that is authorized to access GCP KMS. + #seal "gcpckms" { + # project = "vault-helm-dev" + # region = "global" + # key_ring = "vault-helm-unseal-kr" + # crypto_key = "vault-helm-unseal-key" + #} + + # Example configuration for enabling Prometheus metrics in your config. + #telemetry { + # prometheus_retention_time = "30s" + # disable_hostname = true + #} + + # Run Vault in "HA" mode. There are no storage requirements unless the audit log + # persistence is required. In HA mode Vault will configure itself to use Consul + # for its storage backend. The default configuration provided will work the Consul + # Helm project by default. It is possible to manually configure Vault to use a + # different HA backend. + ha: + enabled: false + replicas: 3 + + # Set the api_addr configuration for Vault HA + # See https://developer.hashicorp.com/vault/docs/configuration#api_addr + # If set to null, this will be set to the Pod IP Address + apiAddr: null + + # Set the cluster_addr configuration for Vault HA + # See https://developer.hashicorp.com/vault/docs/configuration#cluster_addr + # If set to null, this will be set to https://$(HOSTNAME).{{ template "vault.fullname" . }}-internal:8201 + clusterAddr: null + + # Enables Vault's integrated Raft storage. Unlike the typical HA modes where + # Vault's persistence is external (such as Consul), enabling Raft mode will create + # persistent volumes for Vault to store data according to the configuration under server.dataStorage. + # The Vault cluster will coordinate leader elections and failovers internally. + raft: + + # Enables Raft integrated storage + enabled: false + # Set the Node Raft ID to the name of the pod + setNodeId: false + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://developer.hashicorp.com/vault/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + # Supported formats are HCL and JSON. + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + # Enable unauthenticated metrics access (necessary for Prometheus Operator) + #telemetry { + # unauthenticated_metrics_access = "true" + #} + } + + storage "raft" { + path = "/vault/data" + } + + service_registration "kubernetes" {} + + # config is a raw string of default configuration when using a Stateful + # deployment. Default is to use a Consul for its HA storage backend. + # Supported formats are HCL and JSON. + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://developer.hashicorp.com/vault/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + config: | + ui = true + + listener "tcp" { + tls_disable = 1 + address = "[::]:8200" + cluster_address = "[::]:8201" + } + storage "consul" { + path = "vault" + address = "HOST_IP:8500" + } + + service_registration "kubernetes" {} + + # Example configuration for using auto-unseal, using Google Cloud KMS. The + # GKMS keys must already exist, and the cluster must have a service account + # that is authorized to access GCP KMS. + #seal "gcpckms" { + # project = "vault-helm-dev-246514" + # region = "global" + # key_ring = "vault-helm-unseal-kr" + # crypto_key = "vault-helm-unseal-key" + #} + + # Example configuration for enabling Prometheus metrics. + # If you are using Prometheus Operator you can enable a ServiceMonitor resource below. + # You may wish to enable unauthenticated metrics in the listener block above. + #telemetry { + # prometheus_retention_time = "30s" + # disable_hostname = true + #} + + # A disruption budget limits the number of pods of a replicated application + # that are down simultaneously from voluntary disruptions + disruptionBudget: + enabled: true + + # maxUnavailable will default to (n/2)-1 where n is the number of + # replicas. If you'd like a custom value, you can specify an override here. + maxUnavailable: null + + # Definition of the serviceAccount used to run Vault. + # These options are also used when using an external Vault server to validate + # Kubernetes tokens. + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Create a Secret API object to store a non-expiring token for the service account. + # Prior to v1.24.0, Kubernetes used to generate this secret for each service account by default. + # Kubernetes now recommends using short-lived tokens from the TokenRequest API or projected volumes instead if possible. + # For more details, see https://kubernetes.io/docs/concepts/configuration/secret/#service-account-token-secrets + # serviceAccount.create must be equal to 'true' in order to use this feature. + createSecret: false + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + # Extra labels to attach to the serviceAccount + # This should be a YAML map of the labels to apply to the serviceAccount + extraLabels: {} + # Enable or disable a service account role binding with the permissions required for + # Vault's Kubernetes service_registration config option. + # See https://developer.hashicorp.com/vault/docs/configuration/service-registration/kubernetes + serviceDiscovery: + enabled: true + + # Settings for the statefulSet used to run Vault. + statefulSet: + # Extra annotations for the statefulSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the statefulSet. + annotations: {} + + # Set the pod and container security contexts. + # If not set, these will default to, and for *not* OpenShift: + # pod: + # runAsNonRoot: true + # runAsGroup: {{ .Values.server.gid | default 1000 }} + # runAsUser: {{ .Values.server.uid | default 100 }} + # fsGroup: {{ .Values.server.gid | default 1000 }} + # container: + # allowPrivilegeEscalation: false + # + # If not set, these will default to, and for OpenShift: + # pod: {} + # container: {} + securityContext: + pod: {} + container: {} + + # Should the server pods run on the host network + hostNetwork: false + +# Vault UI +ui: + # True if you want to create a Service entry for the Vault UI. + # + # serviceType can be used to control the type of service created. For + # example, setting this to "LoadBalancer" will create an external load + # balancer (for supported K8S installations) to access the UI. + enabled: false + publishNotReadyAddresses: true + # The service should only contain selectors for active Vault pod + activeVaultPodOnly: false + serviceType: "ClusterIP" + serviceNodePort: null + externalPort: 8200 + targetPort: 8200 + + # The IP family and IP families options are to set the behaviour in a dual-stack environment. + # Omitting these values will let the service fall back to whatever the CNI dictates the defaults + # should be. + # These are only supported for kubernetes versions >=1.23.0 + # + # Configures the service's supported IP family, can be either: + # SingleStack: Single-stack service. The control plane allocates a cluster IP for the Service, using the first configured service cluster IP range. + # PreferDualStack: Allocates IPv4 and IPv6 cluster IPs for the Service. + # RequireDualStack: Allocates Service .spec.ClusterIPs from both IPv4 and IPv6 address ranges. + serviceIPFamilyPolicy: "" + + # Sets the families that should be supported and the order in which they should be applied to ClusterIP as well + # Can be IPv4 and/or IPv6. + serviceIPFamilies: [] + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + #loadBalancerSourceRanges: + # - 10.0.0.0/16 + # - 1.78.23.3/32 + + # loadBalancerIP: + + # Extra annotations to attach to the ui service + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the ui service + annotations: {} + +# secrets-store-csi-driver-provider-vault +csi: + # True if you want to install a secrets-store-csi-driver-provider-vault daemonset. + # + # Requires installing the secrets-store-csi-driver separately, see: + # https://github.com/kubernetes-sigs/secrets-store-csi-driver#install-the-secrets-store-csi-driver + # + # With the driver and provider installed, you can mount Vault secrets into volumes + # similar to the Vault Agent injector, and you can also sync those secrets into + # Kubernetes secrets. + enabled: false + + image: + repository: "hashicorp/vault-csi-provider" + tag: "1.5.0" + pullPolicy: IfNotPresent + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: null + # - name: tls + # secret: + # secretName: vault-tls + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: null + # - name: tls + # mountPath: "/vault/tls" + # readOnly: true + + resources: {} + # resources: + # requests: + # cpu: 50m + # memory: 128Mi + # limits: + # cpu: 50m + # memory: 128Mi + + # Override the default secret name for the CSI Provider's HMAC key used for + # generating secret versions. + hmacSecretName: "" + + # Allow modification of the hostNetwork parameter to avoid the need of a + # dedicated pod ip + hostNetwork: false + + # Settings for the daemonSet used to run the provider. + daemonSet: + updateStrategy: + type: RollingUpdate + maxUnavailable: "" + # Extra annotations for the daemonSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the daemonSet. + annotations: {} + # Provider host path (must match the CSI provider's path) + providersDir: "/etc/kubernetes/secrets-store-csi-providers" + # Kubelet host path + kubeletRootDir: "/var/lib/kubelet" + # Extra labels to attach to the vault-csi-provider daemonSet + # This should be a YAML map of the labels to apply to the csi provider daemonSet + extraLabels: {} + # security context for the pod template and container in the csi provider daemonSet + securityContext: + pod: {} + container: {} + + pod: + # Extra annotations for the provider pods. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the pod. + annotations: {} + + # Toleration Settings for provider pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for csi pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Affinity Settings + # This should be either a multi-line string or YAML matching the PodSpec's affinity field. + affinity: {} + + # Extra labels to attach to the vault-csi-provider pod + # This should be a YAML map of the labels to apply to the csi provider pod + extraLabels: {} + + agent: + enabled: true + extraArgs: [] + + image: + repository: "hashicorp/vault" + tag: "1.19.0" + pullPolicy: IfNotPresent + + logFormat: standard + logLevel: info + + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # Priority class for csi pods + priorityClassName: "" + + serviceAccount: + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + + # Extra labels to attach to the vault-csi-provider serviceAccount + # This should be a YAML map of the labels to apply to the csi provider serviceAccount + extraLabels: {} + + # Used to configure readinessProbe for the pods. + readinessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + # Used to configure livenessProbe for the pods. + livenessProbe: + # When a probe fails, Kubernetes will try failureThreshold times before giving up + failureThreshold: 2 + # Number of seconds after the container has started before probe initiates + initialDelaySeconds: 5 + # How often (in seconds) to perform the probe + periodSeconds: 5 + # Minimum consecutive successes for the probe to be considered successful after having failed + successThreshold: 1 + # Number of seconds after which the probe times out. + timeoutSeconds: 3 + + # Configures the log level for the Vault CSI provider. + # Supported log levels include: trace, debug, info, warn, error, and off + logLevel: "info" + + # Deprecated, set logLevel to debug instead. + # If set to true, the logLevel will be set to debug. + debug: false + + # Pass arbitrary additional arguments to vault-csi-provider. + # See https://developer.hashicorp.com/vault/docs/platform/k8s/csi/configurations#command-line-arguments + # for the available command line flags. + extraArgs: [] + +# Vault is able to collect and publish various runtime metrics. +# Enabling this feature requires setting adding `telemetry{}` stanza to +# the Vault configuration. There are a few examples included in the `config` sections above. +# +# For more information see: +# https://developer.hashicorp.com/vault/docs/configuration/telemetry +# https://developer.hashicorp.com/vault/docs/internals/telemetry +serverTelemetry: + # Enable support for the Prometheus Operator. If authorization is not set for authenticating + # to Vault's metrics endpoint, the following Vault server `telemetry{}` config must be included + # in the `listener "tcp"{}` stanza + # telemetry { + # unauthenticated_metrics_access = "true" + # } + # + # See the `standalone.config` for a more complete example of this. + # + # In addition, a top level `telemetry{}` stanza must also be included in the Vault configuration: + # + # example: + # telemetry { + # prometheus_retention_time = "30s" + # disable_hostname = true + # } + # + # Configuration for monitoring the Vault server. + serviceMonitor: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + # + # Instructions on how to install the Helm chart can be found here: + # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack + # More information can be found here: + # https://github.com/prometheus-operator/prometheus-operator + # https://github.com/prometheus-operator/kube-prometheus + + # Enable deployment of the Vault Server ServiceMonitor CustomResource. + enabled: false + + # Selector labels to add to the ServiceMonitor. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Interval at which Prometheus scrapes metrics + interval: 30s + + # Timeout for Prometheus scrapes + scrapeTimeout: 10s + + # tlsConfig used for scraping the Vault metrics API. + # See API reference: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.TLSConfig + # example: + # tlsConfig: + # ca: + # secret: + # name: vault-metrics-client + # key: ca.crt + tlsConfig: {} + + # authorization used for scraping the Vault metrics API. + # See API reference: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.SafeAuthorization + # example: + # authorization: + # credentials: + # name: vault-metrics-client + # key: token + authorization: {} + + prometheusRules: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + + # Deploy the PrometheusRule custom resource for AlertManager based alerts. + # Requires that AlertManager is properly deployed. + enabled: false + + # Selector labels to add to the PrometheusRules. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Some example rules. + rules: [] + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 500ms on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500 + # for: 5m + # labels: + # severity: warning + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 1s on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000 + # for: 5m + # labels: + # severity: critical diff --git a/argo/apps/300-vault/values.yaml b/argo/apps/300-vault/values.yaml index 963511e..3db8736 100644 --- a/argo/apps/300-vault/values.yaml +++ b/argo/apps/300-vault/values.yaml @@ -5,15 +5,16 @@ vault: # Run Vault in development mode (single node, in-memory storage - NOT FOR PRODUCTION) server: dev: + enabled: false + dataStorage: enabled: true - # Ensure HA mode is disabled when running in dev mode ha: enabled: false # raft: # Raft is the default integrated storage, disable if using dev mode # enabled: false - # Configure Ingress for the UI + # Configure Ingress for the UI ingress: enabled: true # ingressClassName: traefik # Use this if you have a default IngressClass @@ -25,5 +26,6 @@ vault: # Vault UI backend service uses HTTP (port 8200) # traefik.ingress.kubernetes.io/service.scheme: http # Usually not needed hosts: - - host: vault.localhost - + - host: vault.cloudfleet.platform.5ha.re + tls: + - secretName: vault-tls diff --git a/argo/apps/350-external-secrets/Chart.yaml b/argo/apps/350-external-secrets/Chart.yaml new file mode 100644 index 0000000..862b184 --- /dev/null +++ b/argo/apps/350-external-secrets/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: external-secrets-wrapper +description: A wrapper Helm chart to deploy Hashicorp Vault with custom values. +version: 0.1.0 # Version of this wrapper chart +appVersion: "0.17.0" + +dependencies: + - name: external-secrets + version: "0.17.0" + repository: https://charts.external-secrets.io diff --git a/argo/apps/350-external-secrets/application.yaml b/argo/apps/350-external-secrets/application.yaml new file mode 100644 index 0000000..91675c5 --- /dev/null +++ b/argo/apps/350-external-secrets/application.yaml @@ -0,0 +1,28 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: external-secrets + namespace: argocd + finalizers: + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer +spec: + project: default + source: + # Source is the Git repository containing this Application manifest and the wrapper chart + repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git # Ensure this is your repo URL + path: argo/apps/350-external-secrets # Path to the wrapper chart directory within the Git repo + targetRevision: cloudfleet # Or your specific branch/tag + + # Helm configuration for the wrapper chart + helm: + releaseName: external-secrets + # Values file is implicitly values.yaml within the source path + destination: + server: https://kubernetes.default.svc + namespace: services # Deploy Vault into the services namespace + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true # Ensure the services namespace is created (if not already) diff --git a/argo/apps/350-external-secrets/templates/k8s.yaml b/argo/apps/350-external-secrets/templates/k8s.yaml new file mode 100644 index 0000000..2266047 --- /dev/null +++ b/argo/apps/350-external-secrets/templates/k8s.yaml @@ -0,0 +1,70 @@ +--- +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: k8s-services + namespace: services +spec: + provider: + kubernetes: + remoteNamespace: services + server: + url: "https://kubernetes.default.svc.cluster.local" + caProvider: + type: ConfigMap + name: kube-root-ca.crt + key: ca.crt + namespace: services + auth: + serviceAccount: + name: "eso-k8s-services" + namespace: services + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: eso-k8s-services + namespace: services + annotations: + argocd.argoproj.io/sync-wave: "2" + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: eso-k8s-services + namespace: services + annotations: + argocd.argoproj.io/sync-wave: "2" +rules: +- apiGroups: [""] + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - authorization.k8s.io + resources: + - selfsubjectrulesreviews + verbs: + - create + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: eso-k8s-services + namespace: services + annotations: + argocd.argoproj.io/sync-wave: "2" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: eso-k8s-services +subjects: +- kind: ServiceAccount + name: eso-k8s-services + namespace: services diff --git a/argo/apps/350-external-secrets/templates/vault-sa.yaml b/argo/apps/350-external-secrets/templates/vault-sa.yaml new file mode 100644 index 0000000..34aadff --- /dev/null +++ b/argo/apps/350-external-secrets/templates/vault-sa.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: eso-vault-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: role-tokenreview-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: eso-vault-sa + namespace: services diff --git a/argo/apps/350-external-secrets/templates/vault-store.yaml b/argo/apps/350-external-secrets/templates/vault-store.yaml new file mode 100644 index 0000000..54b7c1d --- /dev/null +++ b/argo/apps/350-external-secrets/templates/vault-store.yaml @@ -0,0 +1,27 @@ +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: vault-backend +spec: + provider: + vault: + server: "https://vault.cloudfleet.platform.5ha.re" + path: "secrets" + version: "v2" + auth: + # Authenticate against Vault using a Kubernetes ServiceAccount + # token stored in a Secret. + # https://www.vaultproject.io/docs/auth/kubernetes + tokenSecretRef: + name: "eso-vault-secret" + key: "token" + namespace: "services" + # kubernetes: + # # Path where the Kubernetes authentication backend is mounted in Vault + # mountPath: "kubernetes" + # # A required field containing the Vault Role to assume. + # role: "eso-vault-role" + # Optional service account field containing the name + # of a kubernetes ServiceAccount + # serviceAccountRef: + # name: "eso-vault-sa" diff --git a/argo/apps/350-external-secrets/values.yaml b/argo/apps/350-external-secrets/values.yaml new file mode 100644 index 0000000..1781578 --- /dev/null +++ b/argo/apps/350-external-secrets/values.yaml @@ -0,0 +1,9 @@ +# Values for the vault-wrapper chart + +# Values passed to the 'vault' subchart (dependency) +external-secrets: + serviceAccount: + name: external-secrets + extraEnv: + - name: VAULT_SKIP_VERIFY + value: "true" diff --git a/argo/apps/430-crunchy-postgres-operator/Chart.yaml b/argo/apps/430-crunchy-postgres-operator/Chart.yaml new file mode 100644 index 0000000..f5a2743 --- /dev/null +++ b/argo/apps/430-crunchy-postgres-operator/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: crunchy-postgres-operator-wrapper +description: A wrapper Helm chart to deploy Crunchy Postgres Operator with custom values. +version: 0.1.0 # Version of this wrapper chart +appVersion: "5.8.2" # Corresponds to the Crunchy Postgres Operator chart version we depend on + +dependencies: +- name: pgo + version: "5.8.2" # The version of the Crunchy Postgres Operator chart to use + repository: oci://registry.developers.crunchydata.com/crunchydata # The OCI repository of the dependency + # We need to map the values from our local values.yaml to the subchart. + # By default, values under a key matching the dependency name are passed. + # So, values for 'pgo' in our values.yaml will go to the pgo subchart. diff --git a/argo/apps/430-crunchy-postgres-operator/application.yaml b/argo/apps/430-crunchy-postgres-operator/application.yaml new file mode 100644 index 0000000..3de3c30 --- /dev/null +++ b/argo/apps/430-crunchy-postgres-operator/application.yaml @@ -0,0 +1,32 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crunchy-postgres-operator + namespace: argocd + # Add finalizer to ensure that Helm release is deleted before the app + finalizers: + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer +spec: + project: default + source: + # Source is the Git repository containing this Application manifest and the wrapper chart + repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git + path: argo/apps/430-crunchy-postgres-operator # Path to the wrapper chart directory within the Git repo + targetRevision: cloudfleet # Or your specific branch/tag + + # Helm configuration for the wrapper chart + helm: + # releaseName is optional here, defaults based on app name + releaseName: crunchy-postgres-operator + # Values file is implicitly values.yaml within the source path + # valueFiles: # Not needed if using default values.yaml + # - values.yaml + destination: + server: https://kubernetes.default.svc + namespace: postgres-operator # Deploy Crunchy Postgres Operator into its own namespace + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true # Ensure the postgres-operator namespace is created diff --git a/argo/apps/430-crunchy-postgres-operator/values.yaml b/argo/apps/430-crunchy-postgres-operator/values.yaml new file mode 100644 index 0000000..46a63e3 --- /dev/null +++ b/argo/apps/430-crunchy-postgres-operator/values.yaml @@ -0,0 +1,20 @@ +# Values for the crunchy-postgres-operator-wrapper chart + +# Values passed to the 'pgo' subchart (dependency) +pgo: + # Enable debug logging by default + debug: true + + # Single namespace mode (set to false for multi-namespace) + singleNamespace: false + + # Controller configuration + # controllerImages: + # cluster: registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.2-0 + + # High availability configuration + replicas: 1 + # pgoControllerLeaseName: "cpk-leader-election-lease" + + # Disable upgrade checks + disable_check_for_upgrades: true diff --git a/argo/apps/440-crunchy-postgres/Chart.yaml b/argo/apps/440-crunchy-postgres/Chart.yaml new file mode 100644 index 0000000..af0a32b --- /dev/null +++ b/argo/apps/440-crunchy-postgres/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: crunchy-postgres-cluster +description: A Helm chart to deploy a PostgreSQL cluster using Crunchy PostgresCluster custom resources. +version: 0.1.0 # Version of this chart +appVersion: "16" # PostgreSQL version we're deploying +type: application + +# This chart doesn't have dependencies as it creates custom resources directly +# It requires the Crunchy Postgres Operator to be installed separately diff --git a/argo/apps/440-crunchy-postgres/application.yaml b/argo/apps/440-crunchy-postgres/application.yaml new file mode 100644 index 0000000..03563be --- /dev/null +++ b/argo/apps/440-crunchy-postgres/application.yaml @@ -0,0 +1,32 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: crunchy-postgres + namespace: argocd + # Add finalizer to ensure that Helm release is deleted before the app + finalizers: + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer +spec: + project: default + source: + # Source is the Git repository containing this Application manifest and the Helm chart + repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git + path: argo/apps/440-crunchy-postgres # Path to the Helm chart directory within the Git repo + targetRevision: cloudfleet # Or your specific branch/tag + + # Helm configuration for the chart + helm: + # releaseName is optional here, defaults based on app name + releaseName: crunchy-postgres + # Values file is implicitly values.yaml within the source path + # valueFiles: # Not needed if using default values.yaml + # - values.yaml + destination: + server: https://kubernetes.default.svc + namespace: services # Deploy PostgresCluster into the services namespace + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true # Ensure the services namespace is created diff --git a/argo/apps/440-crunchy-postgres/templates/postgres.yaml b/argo/apps/440-crunchy-postgres/templates/postgres.yaml new file mode 100644 index 0000000..41db047 --- /dev/null +++ b/argo/apps/440-crunchy-postgres/templates/postgres.yaml @@ -0,0 +1,45 @@ +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: my-cute-postgres-cluster + namespace: services +spec: + postgresVersion: 17 + + authentication: + rules: + - hba: "host all all all md5" + # - connection: host + # databases: # all + # users: + # hba: "host all all all md5" + + instances: + - name: instance1 + replicas: 1 + dataVolumeClaimSpec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + + resources: + requests: + cpu: 1000m + memory: 512Mi + limits: + cpu: 2000m + memory: 2Gi + + users: + - name: admin + options: "SUPERUSER" + - name: trino + databases: + - trino-catalog + options: "CREATEDB" + - name: zitadel + databases: + - zitadel + options: "CREATEDB CREATEROLE" diff --git a/argo/apps/440-crunchy-postgres/values.yaml b/argo/apps/440-crunchy-postgres/values.yaml new file mode 100644 index 0000000..a6e670f --- /dev/null +++ b/argo/apps/440-crunchy-postgres/values.yaml @@ -0,0 +1,4 @@ +# Minimal values for the crunchy-postgres-cluster chart + +postgresCluster: + name: trino-catalog diff --git a/argo/apps/440-postgres/Chart.yaml b/argo/apps/440-postgres/Chart.yaml new file mode 100644 index 0000000..793613a --- /dev/null +++ b/argo/apps/440-postgres/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: postgres-wrapper +description: A wrapper Helm chart to deploy Postgres with custom values. +version: 0.1.0 # Version of this wrapper chart +appVersion: "v25.0.0" # Corresponds to the Traefik chart version we depend on + +dependencies: + - name: postgres + version: "16.6.3" # The version of the Traefik chart to use + repository: oci://registry-1.docker.io/bitnamicharts/postgresql # The repository of the dependency + # We need to map the values from our local values.yaml to the subchart. + # By default, values under a key matching the dependency name are passed. + # So, values for 'traefik' in our values.yaml will go to the traefik subchart. diff --git a/argo/apps/440-postgres/application-backup.yaml b/argo/apps/440-postgres/application-backup.yaml new file mode 100644 index 0000000..2985126 --- /dev/null +++ b/argo/apps/440-postgres/application-backup.yaml @@ -0,0 +1,32 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: postgres + namespace: argocd + # Add finalizer to ensure that Helm release is deleted before the app + finalizers: + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer +spec: + project: default + source: + # Source is the Git repository containing this Application manifest and the wrapper chart + repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git + path: argo/apps/440-postgres # Path to the wrapper chart directory within the Git repo + targetRevision: cloudfleet # Or your specific branch/tag + + # Helm configuration for the wrapper chart + helm: + # releaseName is optional here, defaults based on app name + releaseName: postgres + # Values file is implicitly values.yaml within the source path + # valueFiles: # Not needed if using default values.yaml + # - values.yaml + destination: + server: https://kubernetes.default.svc + namespace: services # Deploy Traefik into its own namespace + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true # Ensure the traefik namespace is created diff --git a/argo/apps/440-postgres/values.yaml b/argo/apps/440-postgres/values.yaml new file mode 100644 index 0000000..b5047ee --- /dev/null +++ b/argo/apps/440-postgres/values.yaml @@ -0,0 +1,11 @@ +# Values for the postgres-wrapper chart + +# Values passed to the 'postgres' subchart (dependency) +postgres: + image: + repository: docker.io/bitnami/postgresql + tag: 16.1.0-debian-11-r15 + primary: + name: catalog + auth: + postgresPassword: "testadminpassword" diff --git a/argo/apps/450-lakekeeper/lakekeeper.yaml b/argo/apps/450-lakekeeper/application-backup.yaml similarity index 97% rename from argo/apps/450-lakekeeper/lakekeeper.yaml rename to argo/apps/450-lakekeeper/application-backup.yaml index 98da629..41fb461 100644 --- a/argo/apps/450-lakekeeper/lakekeeper.yaml +++ b/argo/apps/450-lakekeeper/application-backup.yaml @@ -28,4 +28,4 @@ spec: prune: true selfHeal: true syncOptions: - - CreateNamespace=true + - CreateNamespace=true diff --git a/argo/apps/450-lakekeeper/values.yaml b/argo/apps/450-lakekeeper/values.yaml index a9f9d1c..6aa2fab 100644 --- a/argo/apps/450-lakekeeper/values.yaml +++ b/argo/apps/450-lakekeeper/values.yaml @@ -4,8 +4,8 @@ lakekeeper: externalDatabase: type: postgresql - host: + host: port: 5432 user: lakekeeper password: lakekeeper_password # Use a secure method to manage this in production - database: lakekeeper \ No newline at end of file + database: lakekeeper diff --git a/argo/apps/500-trino/trino.yaml b/argo/apps/500-trino/application.yaml similarity index 81% rename from argo/apps/500-trino/trino.yaml rename to argo/apps/500-trino/application.yaml index 65cbafb..de881c7 100644 --- a/argo/apps/500-trino/trino.yaml +++ b/argo/apps/500-trino/application.yaml @@ -5,15 +5,14 @@ metadata: namespace: argocd # Add finalizer to ensure that Helm release is deleted before the app finalizers: - - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer spec: project: default source: # Source is the Git repository containing this Application manifest and the wrapper chart repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git # Assuming the same repo path: argo/apps/500-trino # Path to the Trino wrapper chart directory - targetRevision: HEAD # Or your specific branch/tag - + targetRevision: cloudfleet # Or your specific branch/tag # Helm configuration for the wrapper chart helm: releaseName: trino # Helm release name @@ -28,4 +27,4 @@ spec: prune: true selfHeal: true syncOptions: - - CreateNamespace=true # Ensure the trino namespace is created + - CreateNamespace=true # Ensure the trino namespace is created diff --git a/argo/apps/500-trino/default-values.yaml b/argo/apps/500-trino/default-values.yaml new file mode 100644 index 0000000..03f59fb --- /dev/null +++ b/argo/apps/500-trino/default-values.yaml @@ -0,0 +1,1170 @@ +## Below is not used, just for reference +######## +###--- + +# Default values for trino. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Override resource names to avoid name conflicts when deploying multiple +# releases in the same namespace. +# @raw +# Example: +# ```yaml +# coordinatorNameOverride: trino-coordinator-adhoc +# workerNameOverride: trino-worker-adhoc +# nameOverride: trino-adhoc +# ``` +nameOverride: +coordinatorNameOverride: +workerNameOverride: + +image: + # -- Image registry, defaults to empty, which results in DockerHub usage + registry: "" + # -- Repository location of the Trino image, typically `organization/imagename` + repository: trinodb/trino + # -- Image tag, defaults to the Trino release version specified as `appVersion` from Chart.yaml + tag: "" + # -- Optional digest value of the image specified as `sha256:abcd...`. A specified value overrides `tag`. + digest: "" + # -- When true, only the content in `repository` is used as image reference + useRepositoryAsSoleImageReference: false + pullPolicy: IfNotPresent + +# -- An optional list of references to secrets in the same namespace to use for pulling images. +# @raw +# Example: +# ```yaml +# imagePullSecrets: +# - name: registry-credentials +# ``` +imagePullSecrets: [] + +server: + workers: 2 + node: + # server.node.environment -- Supports templating with `tpl`. + environment: production + dataDir: /data/trino + pluginDir: /usr/lib/trino/plugin + log: + trino: + level: INFO + config: + path: /etc/trino + https: + enabled: false + port: 8443 + keystore: + path: "" + # -- Trino supports multiple [authentication + # types](https://trino.io/docs/current/security/authentication-types.html): + # PASSWORD, CERTIFICATE, OAUTH2, JWT, KERBEROS. + authenticationType: "" + query: + maxMemory: "4GB" + exchangeManager: {} + # server.exchangeManager -- Mandatory [exchange manager + # configuration](https://trino.io/docs/current/admin/fault-tolerant-execution.html#id1). + # Used to set the name and location(s) of spooling data storage. For multiple destinations use a list or a comma separated URI locations. + # To enable fault-tolerant execution, set the `retry-policy` property in `additionalConfigProperties`. + # Additional exchange manager configurations can be added to `additionalExchangeManagerProperties`. + # @raw + # Example: + # ```yaml + # server: + # exchangeManager: + # name: "filesystem" + # baseDir: + # - "/tmp/trino-local-file-system-exchange-manager" + # additionalConfigProperties: + # - retry-policy=TASK + # additionalExchangeManagerProperties: + # - exchange.sink-buffer-pool-min-size=10 + # - exchange.sink-buffers-per-partition=2 + # - exchange.source-concurrent-readers=4 + # ``` + + workerExtraConfig: "" + coordinatorExtraConfig: "" + # server.autoscaling -- Configure [Horizontal Pod Autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) + # for workers (`server.keda.enabled` must be `false`). + autoscaling: + enabled: false + maxReplicas: 5 + # -- Target average CPU utilization, represented as a percentage of requested CPU. To disable scaling based on CPU, + # set to an empty string. + targetCPUUtilizationPercentage: 50 + # -- Target average memory utilization, represented as a percentage of requested memory. To disable scaling + # based on memory, set to an empty string. + targetMemoryUtilizationPercentage: 80 + behavior: {} + # server.autoscaling.behavior -- Configuration for scaling up and down. + # @raw + # Example: + # ```yaml + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + # scaleUp: + # stabilizationWindowSeconds: 0 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + # - type: Pods + # value: 4 + # periodSeconds: 15 + # selectPolicy: Max + # ``` + + # server.keda -- Configure [Kubernetes Event-driven Autoscaling](https://keda.sh/) for workers + # (`server.autoscaling.enabled` must be `false`). + keda: + enabled: false + pollingInterval: 30 + # -- Period (in seconds) to wait after the last trigger reported active before scaling the resource back to 0 + cooldownPeriod: 300 + # -- The delay (in seconds) before the `cooldownPeriod` starts after the initial creation of the `ScaledObject`. + initialCooldownPeriod: 0 + # -- Minimum number of replicas KEDA will scale the resource down to. + # By default, it’s scale to zero, but you can use it with some other value as well. + minReplicaCount: 0 + # -- This setting is passed to the HPA definition that KEDA will create for a given resource and + # holds the maximum number of replicas of the target resource. + maxReplicaCount: 5 + fallback: {} + # server.keda.fallback -- Defines a number of replicas to fall back to if a scaler is in an error state. + # @raw + # Example: + # ```yaml + # fallback: # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 # Mandatory if fallback section is included + # ``` + advanced: {} + # server.keda.advanced -- Specifies HPA related options + # @raw + # Example: + # ```yaml + # advanced: + # horizontalPodAutoscalerConfig: + # behavior: + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + # ``` + triggers: [] + # server.keda.triggers -- List of triggers to activate scaling of the target resource + # @raw + # Example: + # ```yaml + # triggers: + # - type: prometheus + # metricType: Value + # metadata: + # serverAddress: "http://prometheus.example.com" + # threshold: "1" + # metricName: required_workers + # query: >- + # sum by (service) + # (avg_over_time(trino_execution_ClusterSizeMonitor_RequiredWorkers{service={{ include "trino.fullname" . | quote }}}[5s])) + # ``` + annotations: {} + # server.keda.annotations -- Annotations to apply to the ScaledObject CRD. + # @raw + # Example: + # ```yaml + # annotations: + # autoscaling.keda.sh/paused-replicas: "0" + # autoscaling.keda.sh/paused: "true" + # ``` + +accessControl: {} +# accessControl -- [System access +# control](https://trino.io/docs/current/security/built-in-system-access-control.html) +# configuration. +# @raw +# Set the type property to either: +# * `configmap`, and provide the rule file contents in `rules`, +# * `properties`, and provide configuration properties in `properties`. +# Properties example: +# ```yaml +# type: properties +# properties: | +# access-control.name=custom-access-control +# access-control.custom_key=custom_value +# ``` +# Config map example: +# ```yaml +# type: configmap +# refreshPeriod: 60s +# # Rules file is mounted to /etc/trino/access-control +# configFile: "rules.json" +# rules: +# rules.json: |- +# { +# "catalogs": [ +# { +# "user": "admin", +# "catalog": "(mysql|system)", +# "allow": "all" +# }, +# { +# "group": "finance|human_resources", +# "catalog": "postgres", +# "allow": true +# }, +# { +# "catalog": "hive", +# "allow": "all" +# }, +# { +# "user": "alice", +# "catalog": "postgresql", +# "allow": "read-only" +# }, +# { +# "catalog": "system", +# "allow": "none" +# } +# ], +# "schemas": [ +# { +# "user": "admin", +# "schema": ".*", +# "owner": true +# }, +# { +# "user": "guest", +# "owner": false +# }, +# { +# "catalog": "default", +# "schema": "default", +# "owner": true +# } +# ] +# } +# ``` + +resourceGroups: {} +# resourceGroups -- [Resource groups control](https://trino.io/docs/current/admin/resource-groups.html) +# @raw +# Set the type property to either: +# * `configmap`, and provide the Resource groups file contents in `resourceGroupsConfig`, +# * `properties`, and provide configuration properties in `properties`. +# Properties example: +# ```yaml +# type: properties +# properties: | +# resource-groups.configuration-manager=db +# resource-groups.config-db-url=jdbc:postgresql://trino-postgresql.postgresql.svc.cluster.local:3306/resource_groups +# resource-groups.config-db-user=username +# resource-groups.config-db-password=password +# ``` +# Config map example: +# ```yaml +# type: configmap +# # Resource groups file is mounted to /etc/trino/resource-groups/resource-groups.json +# resourceGroupsConfig: |- +# { +# "rootGroups": [ +# { +# "name": "global", +# "softMemoryLimit": "80%", +# "hardConcurrencyLimit": 100, +# "maxQueued": 100, +# "schedulingPolicy": "fair", +# "jmxExport": true, +# "subGroups": [ +# { +# "name": "admin", +# "softMemoryLimit": "30%", +# "hardConcurrencyLimit": 20, +# "maxQueued": 10 +# }, +# { +# "name": "finance_human_resources", +# "softMemoryLimit": "20%", +# "hardConcurrencyLimit": 15, +# "maxQueued": 10 +# }, +# { +# "name": "general", +# "softMemoryLimit": "30%", +# "hardConcurrencyLimit": 20, +# "maxQueued": 10 +# }, +# { +# "name": "readonly", +# "softMemoryLimit": "10%", +# "hardConcurrencyLimit": 5, +# "maxQueued": 5 +# } +# ] +# } +# ], +# "selectors": [ +# { +# "user": "admin", +# "group": "global.admin" +# }, +# { +# "group": "finance|human_resources", +# "group": "global.finance_human_resources" +# }, +# { +# "user": "alice", +# "group": "global.readonly" +# }, +# { +# "group": "global.general" +# } +# ] +# } +# ``` + +additionalNodeProperties: [] +# additionalNodeProperties -- [Additional node +# properties](https://trino.io/docs/current/installation/deployment.html#log-levels). +# @raw +# Example, assuming the NODE_ID environment variable has been set: +# ```yaml +# - node.id=${NODE_ID} +# ``` + +additionalConfigProperties: [] +# additionalConfigProperties -- [Additional config +# properties](https://trino.io/docs/current/admin/properties.html). +# @raw +# Example: +# ```yaml +# - internal-communication.shared-secret=random-value-999 +# - http-server.process-forwarded=true +# ``` + +additionalLogProperties: [] +# additionalLogProperties -- [Additional log +# properties](https://trino.io/docs/current/installation/deployment.html#log-levels). +# @raw +# Example: +# ```yaml +# - io.airlift=DEBUG +# ``` + +additionalExchangeManagerProperties: [] +# additionalExchangeManagerProperties -- [Exchange manager +# properties](https://trino.io/docs/current/admin/fault-tolerant-execution.html#exchange-manager). +# @raw +# Example: +# ```yaml +# - exchange.s3.region=object-store-region +# - exchange.s3.endpoint=your-object-store-endpoint +# - exchange.s3.aws-access-key=your-access-key +# - exchange.s3.aws-secret-key=your-secret-key +# ``` + +eventListenerProperties: [] +# eventListenerProperties -- [Event +# listener](https://trino.io/docs/current/develop/event-listener.html#event-listener) +# properties. To configure multiple event listeners, add them in +# `coordinator.additionalConfigFiles` and `worker.additionalConfigFiles`, and +# set the `event-listener.config-files` property in +# `additionalConfigProperties` to their locations. +# @raw +# Example: +# ```yaml +# - event-listener.name=custom-event-listener +# - custom-property1=custom-value1 +# - custom-property2=custom-value2 +# ``` + +catalogs: + tpch: | + connector.name=tpch + tpch.splits-per-node=4 + tpcds: | + connector.name=tpcds + tpcds.splits-per-node=4 +# catalogs -- Configure +# [catalogs](https://trino.io/docs/current/installation/deployment.html#catalog-properties). +# @raw +# Example: +# ```yaml +# objectstore: | +# connector.name=iceberg +# iceberg.catalog.type=glue +# jmx: | +# connector.name=memory +# memory: | +# connector.name=memory +# memory.max-data-per-node=128MB +# ``` + +additionalCatalogs: {} +# additionalCatalogs -- Deprecated, use `catalogs` instead. Configure additional +# [catalogs](https://trino.io/docs/current/installation/deployment.html#catalog-properties). + +env: [] +# env -- additional environment variables added to every pod, specified as a list with explicit values +# @raw +# Example: +# ```yaml +# - name: NAME +# value: "value" +# ``` + +envFrom: [] +# envFrom -- additional environment variables added to every pod, specified as a list of either `ConfigMap` +# or `Secret` references +# @raw +# Example: +# ```yaml +# - secretRef: +# name: extra-secret +# ``` + +initContainers: {} +# initContainers -- Additional [containers that run to +# completion](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) +# during pod initialization. +# @raw +# Example: +# ```yaml +# coordinator: +# - name: init-coordinator +# image: busybox:1.28 +# imagePullPolicy: IfNotPresent +# command: ['sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"] +# worker: +# - name: init-worker +# image: busybox:1.28 +# command: ['sh', '-c', 'echo The worker is running! && sleep 3600'] +# ``` + +sidecarContainers: {} +# sidecarContainers -- Additional [containers that starts +# before](https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/) +# the Trino container and continues to run. +# @raw +# Example: +# ```yaml +# coordinator: +# - name: side-coordinator +# image: busybox:1.28 +# imagePullPolicy: IfNotPresent +# command: ['sleep', '1'] +# worker: +# - name: side-worker +# image: busybox:1.28 +# imagePullPolicy: IfNotPresent +# command: ['sleep', '1'] +# ``` + +# -- [Pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) configuration. +# To remove the default, set it to null (or `~`). +securityContext: + runAsUser: 1000 + runAsGroup: 1000 + +# -- [Container security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) configuration. +containerSecurityContext: + # -- Control whether a process can gain more privileges than its parent process. + allowPrivilegeEscalation: false + capabilities: + # -- A list of the Linux kernel capabilities that are dropped from every container. Valid values are listed in + # [the capabilities manual page](https://man7.org/linux/man-pages/man7/capabilities.7.html). Ensure # to remove + # the "CAP_" prefix which the kernel attaches to the names of permissions. + drop: + - ALL + +shareProcessNamespace: + coordinator: false + worker: false + +service: + annotations: {} + type: ClusterIP + port: 8080 + # service.nodePort -- The port the service listens on the host, for the `NodePort` type. If not set, Kubernetes will + # [allocate a port + # automatically](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport-custom-port). + nodePort: "" + +auth: {} +# auth -- Available authentication methods. +# @raw +# Use username and password provided as a [password file](https://trino.io/docs/current/security/password-file.html#file-format): +# ```yaml +# passwordAuth: "username:encrypted-password-with-htpasswd" +# ``` +# Set the name of a secret containing this file in the password.db key +# ```yaml +# passwordAuthSecret: "trino-password-authentication" +# ``` +# Additionally, set [users' groups](https://trino.io/docs/current/security/group-file.html#file-format): +# ```yaml +# refreshPeriod: 5s +# groups: "group_name:user_1,user_2,user_3" +# ``` +# Set the name of a secret containing this file in the group.db key +# ```yaml +# groupAuthSecret: "trino-group-authentication" +# ``` + +serviceAccount: + # -- Specifies whether a service account should be created + create: false + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # -- Annotations to add to the service account + annotations: {} + +configMounts: [] +# configMounts -- Allows mounting additional Trino configuration files from +# Kubernetes config maps on all nodes. +# @raw +# Example: +# ```yaml +# - name: sample-config-mount +# configMap: sample-config-map +# path: /config-map/sample.json +# subPath: sample.json +# ``` + +secretMounts: [] +# secretMounts -- Allows mounting additional Trino configuration files from +# Kubernetes secrets on all nodes. +# @raw +# Example: +# ```yaml +# - name: sample-secret +# secretName: sample-secret +# path: /secrets/sample.json +# subPath: sample.json +# ``` + +coordinator: + deployment: + annotations: {} + progressDeadlineSeconds: 600 + # coordinator.deployment.progressDeadlineSeconds -- The maximum time in seconds for a deployment to make progress before it + # is considered failed. The deployment controller continues to + # process failed deployments and a condition with a ProgressDeadlineExceeded + # reason is surfaced in the deployment status. + + revisionHistoryLimit: 10 + # coordinator.deployment.revisionHistoryLimit -- The number of old ReplicaSets to retain to allow rollback. + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + # coordinator.deployment.strategy -- The deployment strategy to use to replace existing pods with new ones. + + jvm: + maxHeapSize: "8G" + gcMethod: + type: "UseG1GC" + g1: + heapRegionSize: "32M" + + config: + memory: + heapHeadroomPerNode: "" + nodeScheduler: + includeCoordinator: false + # coordinator.config.nodeScheduler.includeCoordinator -- Allows scheduling work on the coordinator so that a + # single machine can function as both coordinator and worker. For large clusters, processing work on the + # coordinator can negatively impact query performance because the machine's resources are not available for the + # critical coordinator tasks of scheduling, managing, and monitoring query execution. + query: + maxMemoryPerNode: "1GB" + + additionalJVMConfig: [] + + additionalExposedPorts: {} + # coordinator.additionalExposedPorts -- Additional ports configured in the coordinator container and the service. + # @raw + # Example: + # ```yaml + # https: + # servicePort: 8443 + # name: https + # port: 8443 + # nodePort: 30443 + # protocol: TCP + # ``` + + resources: {} + # coordinator.resources -- It is recommended not to specify default resources + # and to leave this as a conscious choice for the user. This also increases + # chances charts run on environments with little resources, such as Minikube. + # If you do want to specify resources, use the following example, and adjust + # it as necessary. + # @raw + # Example: + # ```yaml + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # ``` + + livenessProbe: {} + # coordinator.livenessProbe -- [Liveness + # probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) + # options + # @raw + # Example: + # ```yaml + # initialDelaySeconds: 20 + # periodSeconds: 10 + # timeoutSeconds: 5 + # failureThreshold: 6 + # successThreshold: 1 + # ``` + readinessProbe: {} + # coordinator.readinessProbe -- [Readiness + # probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) + # @raw + # Example: + # ```yaml + # initialDelaySeconds: 20 + # periodSeconds: 10 + # timeoutSeconds: 5 + # failureThreshold: 6 + # successThreshold: 1 + # ``` + + lifecycle: {} + # coordinator.lifecycle -- Coordinator container [lifecycle + # events](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/) + # @raw + # Example: + # ```yaml + # preStop: + # exec: + # command: ["/bin/sh", "-c", "sleep 120"] + # ``` + + terminationGracePeriodSeconds: 30 + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + additionalConfigFiles: {} + # coordinator.additionalConfigFiles -- Additional config files placed in the default configuration directory. + # Supports templating the files' contents with `tpl`. + # @raw + # Example: + # ```yaml + # secret.txt: | + # secret-value={{- .Values.someValue }} + # ``` + + additionalVolumes: [] + # coordinator.additionalVolumes -- One or more additional volumes to add to the coordinator. + # @raw + # Example: + # ```yaml + # - name: extras + # emptyDir: {} + # ``` + + additionalVolumeMounts: [] + # coordinator.additionalVolumeMounts -- One or more additional volume mounts to add to the coordinator. + # @raw + # Example: + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + + annotations: {} + # coordinator.annotations -- Annotations to add to the coordinator pod. + # @raw + # By default, the following annotations are added to the coordinator pod: + # - `checksum/access-control-config` - checksum of the coordinator access control config file; + # - `checksum/catalog-config` - checksum of the catalog config file; + # - `checksum/coordinator-config` - checksum of the coordinator config file. + # This allows for automatic rolling updates on configuration changes. This behaviour can be disabled by manually + # setting these annotations to fixed constants in the `coordinator.annotations` section. + # Example: + # ```yaml + # annotations: + # checksum/access-control-config: "" + # checksum/catalog-config: "" + # checksum/coordinator-config: "" + # ``` + + labels: {} + + configMounts: [] + # coordinator.configMounts -- Allows mounting additional Trino configuration + # files from Kubernetes config maps on the coordinator node. + # @raw + # Example: + # ```yaml + # - name: sample-config-mount + # configMap: sample-config-mount + # path: /config-mount/sample.json + # subPath: sample.json + # ``` + + secretMounts: [] + # coordinator.secretMounts -- Allows mounting additional Trino configuration + # files from Kubernetes secrets on the coordinator node. + # @raw + # Example: + # ```yaml + # - name: sample-secret + # secretName: sample-secret + # path: /secrets/sample.json + # subPath: sample.json + # ``` + +worker: + deployment: + annotations: {} + progressDeadlineSeconds: 600 + # worker.deployment.progressDeadlineSeconds -- The maximum time in seconds for a deployment to make progress before it + # is considered failed. The deployment controller continues to + # process failed deployments and a condition with a ProgressDeadlineExceeded + # reason is surfaced in the deployment status. + + revisionHistoryLimit: 10 + # worker.deployment.revisionHistoryLimit -- The number of old ReplicaSets to retain to allow rollback. + + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + # worker.deployment.strategy -- The deployment strategy to use to replace existing pods with new ones. + + jvm: + maxHeapSize: "8G" + gcMethod: + type: "UseG1GC" + g1: + heapRegionSize: "32M" + + config: + memory: + heapHeadroomPerNode: "" + query: + maxMemoryPerNode: "1GB" + + additionalJVMConfig: [] + + additionalExposedPorts: {} + # worker.additionalExposedPorts -- Additional container ports configured in all worker pods and the worker service. + # @raw + # Example: + # ```yaml + # https: + # servicePort: 8443 + # name: https + # port: 8443 + # protocol: TCP + # ``` + + resources: {} + # worker.resources -- It is recommended not to specify default resources and + # to leave this as a conscious choice for the user. This also increases + # chances charts run on environments with little resources, such as Minikube. + # If you do want to specify resources, use the following example, and adjust + # it as necessary. + # @raw + # Example: + # ```yaml + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # ``` + + livenessProbe: {} + # worker.livenessProbe -- [Liveness + # probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) + # @raw + # Example: + # ```yaml + # initialDelaySeconds: 20 + # periodSeconds: 10 + # timeoutSeconds: 5 + # failureThreshold: 6 + # successThreshold: 1 + # ``` + readinessProbe: {} + # worker.readinessProbe -- [Readiness + # probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) + # @raw + # Example: + # ```yaml + # initialDelaySeconds: 20 + # periodSeconds: 10 + # timeoutSeconds: 5 + # failureThreshold: 6 + # successThreshold: 1 + # ``` + + lifecycle: {} + # worker.lifecycle -- Worker container [lifecycle + # events](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/) + # + # Setting `worker.lifecycle` conflicts with `worker.gracefulShutdown`. + # + # @raw + # Example: + # ```yaml + # preStop: + # exec: + # command: ["/bin/sh", "-c", "sleep 120"] + # ``` + + gracefulShutdown: + enabled: false + gracePeriodSeconds: 120 + # worker.gracefulShutdown -- Configure [graceful + # shutdown](https://trino.io/docs/current/admin/graceful-shutdown.html) in order to ensure that workers terminate + # without affecting running queries, given a sufficient grace period. When enabled, the value of + # `worker.terminationGracePeriodSeconds` must be at least two times greater than the configured `gracePeriodSeconds`. + # Enabling `worker.gracefulShutdown` conflicts with `worker.lifecycle`. When a custom `worker.lifecycle` configuration + # needs to be used, graceful shutdown must be configured manually. + # + # @raw + # Example: + # ```yaml + # gracefulShutdown: + # enabled: true + # gracePeriodSeconds: 120 + # ``` + + terminationGracePeriodSeconds: 30 + + nodeSelector: {} + + tolerations: [] + + affinity: {} + + additionalConfigFiles: {} + # worker.additionalConfigFiles -- Additional config files placed in the default configuration directory. + # Supports templating the files' contents with `tpl`. + # @raw + # Example: + # ```yaml + # secret.txt: | + # secret-value={{- .Values.someValue }} + # ``` + + additionalVolumes: [] + # worker.additionalVolumes -- One or more additional volume mounts to add to all workers. + # @raw + # Example: + # ```yaml + # - name: extras + # emptyDir: {} + # ``` + + additionalVolumeMounts: [] + # worker.additionalVolumeMounts -- One or more additional volume mounts to add to all workers. + # @raw + # Example: + # ```yaml + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + # ``` + + annotations: {} + # worker.annotations -- Annotations to add to the worker pods. + # @raw + # By default, the following annotations are added to the worker pods: + # - `checksum/access-control-config` - checksum of the worker access control config file; + # - `checksum/catalog-config` - checksum of the catalog config file; + # - `checksum/worker-config` - checksum of the worker config file. + # This allows for automatic rolling updates on configuration changes. This behaviour can be disabled by manually + # setting these annotations to fixed constants in the `worker.annotations` section. + # Example: + # ```yaml + # annotations: + # checksum/access-control-config: "" + # checksum/catalog-config: "" + # checksum/worker-config: "" + # ``` + + labels: {} + + configMounts: [] + # worker.configMounts -- Allows mounting additional Trino configuration + # files from Kubernetes config maps on all worker nodes. + # @raw + # Example: + # ```yaml + # - name: sample-config-mount + # configMap: sample-config-mount + # path: /config-mount/sample.json + # subPath: sample.json + # ``` + + secretMounts: [] + # worker.secretMounts -- Allows mounting additional Trino configuration + # files from Kubernetes secrets on all worker nodes. + # @raw + # Example: + # ```yaml + # - name: sample-secret + # secretName: sample-secret + # path: /secrets/sample.json + # subPath: sample.json + # ``` + +kafka: + mountPath: "/etc/trino/schemas" + tableDescriptions: {} + # kafka.tableDescriptions -- Custom kafka table descriptions that will be mounted in mountPath. + # @raw + # Example: + # ```yaml + # testschema.json: |- + # { + # "tableName": "testtable", + # "schemaName": "testschema", + # "topicName": "testtopic", + # "key": { + # "dataFormat": "json", + # "fields": [ + # { + # "name": "_key", + # "dataFormat": "VARCHAR", + # "type": "VARCHAR", + # "hidden": "false" + # } + # ] + # }, + # "message": { + # "dataFormat": "json", + # "fields": [ + # { + # "name": "id", + # "mapping": "id", + # "type": "BIGINT" + # }, + # { + # "name": "test_field", + # "mapping": "test_field", + # "type": "VARCHAR" + # } + # ] + # } + # } + # ``` + +jmx: + # -- Set to true to enable the RMI server to expose Trino's [JMX metrics](https://trino.io/docs/current/admin/jmx.html). + enabled: false + registryPort: 9080 + serverPort: 9081 + exporter: + # jmx.exporter.enabled -- Set to true to export JMX Metrics via HTTP for [Prometheus](https://github.com/prometheus/jmx_exporter) consumption + enabled: false + image: bitnami/jmx-exporter:1.0.1 + pullPolicy: Always + port: 5556 + configProperties: "" + # jmx.exporter.configProperties -- The string value is templated using `tpl`. The JMX config properties file + # is mounted to `/etc/jmx-exporter/jmx-exporter-config.yaml`. + # @raw + # Example: + # ```yaml + # configProperties: |- + # hostPort: localhost:{{- .Values.jmx.registryPort }} + # startDelaySeconds: 0 + # ssl: false + # lowercaseOutputName: false + # lowercaseOutputLabelNames: false + # includeObjectNames: ["java.lang:type=Threading"] + # autoExcludeObjectNameAttributes: true + # excludeObjectNameAttributes: + # "java.lang:type=OperatingSystem": + # - "ObjectName" + # "java.lang:type=Runtime": + # - "ClassPath" + # - "SystemProperties" + # rules: + # - pattern: 'java\.lang<(.*)>ThreadCount: (.*)' + # name: java_lang_Threading_ThreadCount + # value: '$2' + # help: 'ThreadCount (java.lang<>ThreadCount)' + # type: UNTYPED + # ``` + securityContext: {} + resources: {} + # jmx.exporter.resources -- It is recommended not to specify default resources + # and to leave this as a conscious choice for the user. This also increases + # chances charts run on environments with little resources, such as Minikube. + # If you do want to specify resources, use the following example, and adjust + # it as necessary. + # @raw + # Example: + # ```yaml + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # ``` + coordinator: {} + # jmx.coordinator -- Override JMX configurations for the Trino coordinator. + # @raw + # Example + # ```yaml + # coordinator: + # enabled: true + # exporter: + # enabled: true + # configProperties: |- + # hostPort: localhost:{{- .Values.jmx.registryPort }} + # startDelaySeconds: 0 + # ssl: false + # ``` + worker: {} + # jmx.worker -- Override JMX configurations for the Trino workers. + # @raw + # Example + # ```yaml + # worker: + # enabled: true + # exporter: + # enabled: true + # ``` + +serviceMonitor: + # serviceMonitor.enabled -- Set to true to create resources for the + # [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator). + enabled: false + + apiVersion: monitoring.coreos.com/v1 + # serviceMonitor.labels -- Labels for serviceMonitor, so that Prometheus can select it + labels: + prometheus: kube-prometheus + # serviceMonitor.interval -- The serviceMonitor web endpoint interval + interval: "30s" + coordinator: {} + # serviceMonitor.coordinator -- Override ServiceMonitor configurations for the Trino coordinator. + # @raw + # Example + # ```yaml + # coordinator: + # enabled: true + # labels: + # prometheus: my-prometheus + # ``` + worker: {} + # serviceMonitor.worker -- Override ServiceMonitor configurations for the Trino workers. + # @raw + # Example + # ```yaml + # worker: + # enabled: true + # labels: + # prometheus: my-prometheus + # ``` + + # -- Labels that get applied to every resource's metadata +commonLabels: {} + +ingress: + enabled: false + className: "" + annotations: {} + hosts: [] + # ingress.hosts -- [Ingress + # rules](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules). + # @raw + # Example: + # ```yaml + # - host: trino.example.com + # paths: + # - path: / + # pathType: ImplementationSpecific + # ``` + tls: [] + # ingress.tls -- Ingress + # [TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) + # configuration. + # @raw + # Example: + # ```yaml + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # ``` + +networkPolicy: + # networkPolicy.enabled -- Set to true to enable Trino pod protection with a + # [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/). + # By default, the NetworkPolicy will only allow Trino pods to communicate with each other. + # @raw + # > [!NOTE] + # > - NetworkPolicies cannot block the ingress traffic coming directly + # > from the Kubernetes node on which the Pod is running, + # > and are thus incompatible with services of type `NodePort`. + # > - When using NetworkPolicies together with JMX metrics export, + # > additional ingress rules might be required to allow metric scraping. + enabled: false + # networkPolicy.ingress -- Additional ingress rules to apply to the Trino pods. + # @raw + # Example: + # ```yaml + # - from: + # - ipBlock: + # cidr: 172.17.0.0/16 + # except: + # - 172.17.1.0/24 + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: prometheus + # - podSelector: + # matchLabels: + # role: backend-app + # ports: + # - protocol: TCP + # port: 8080 + # - protocol: TCP + # port: 5556 + # ``` + ingress: [] + # networkPolicy.egress -- Egress rules to apply to the Trino pods. + # @raw + # Example: + # ```yaml + # - to: + # - podSelector: + # matchLabels: + # role: log-ingestor + # ports: + # - protocol: TCP + # port: 9999 + # ``` + egress: [] diff --git a/argo/apps/500-trino/templates/certificate.yaml b/argo/apps/500-trino/templates/certificate.yaml new file mode 100644 index 0000000..770b62f --- /dev/null +++ b/argo/apps/500-trino/templates/certificate.yaml @@ -0,0 +1,11 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: trino-cert +spec: + secretName: trino-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - trino.cloudfleet.platform.5ha.re diff --git a/argo/apps/500-trino/templates/ingress.yaml b/argo/apps/500-trino/templates/ingress.yaml new file mode 100644 index 0000000..fb5bb0a --- /dev/null +++ b/argo/apps/500-trino/templates/ingress.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: trino-ingress + annotations: + ingress.kubernetes.io/ssl-redirect: "true" +spec: + rules: + - host: trino.cloudfleet.platform.5ha.re + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: trino + port: + number: 8080 + tls: + - secretName: trino-tls diff --git a/argo/apps/500-trino/templates/pg_credentials.yaml b/argo/apps/500-trino/templates/pg_credentials.yaml new file mode 100644 index 0000000..a9f3c97 --- /dev/null +++ b/argo/apps/500-trino/templates/pg_credentials.yaml @@ -0,0 +1,26 @@ +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: pg-credentials +spec: + refreshInterval: 300s # Refresh every 5 minutes + secretStoreRef: + name: k8s-services + kind: ClusterSecretStore + data: + - secretKey: HOST # Key in the Kubernetes Secret + remoteRef: + key: my-cute-postgres-cluster-pguser-trino + property: host # Property to extract from the Vault secret + - secretKey: PORT # Key in the Kubernetes Secret + remoteRef: + key: my-cute-postgres-cluster-pguser-trino + property: port # Property to extract from the Vault secret + - secretKey: USERNAME # Key in the Kubernetes Secret + remoteRef: + key: my-cute-postgres-cluster-pguser-trino + property: user # Property to extract from the Vault secret + - secretKey: PASSWORD # Key in the Kubernetes Secret + remoteRef: + key: my-cute-postgres-cluster-pguser-trino + property: password # Property to extract from the Vault secret diff --git a/argo/apps/500-trino/templates/s3.yaml b/argo/apps/500-trino/templates/s3.yaml new file mode 100644 index 0000000..0537e6a --- /dev/null +++ b/argo/apps/500-trino/templates/s3.yaml @@ -0,0 +1,26 @@ +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: s3-credentials +spec: + refreshInterval: 300s # Refresh every 5 minutes + secretStoreRef: + name: vault-backend + kind: ClusterSecretStore + data: + - secretKey: ENDPOINT # Key in the Kubernetes Secret + remoteRef: + key: platform/s3-credentials # Path in Vault + property: ENDPOINT # Property to extract from the Vault secret + - secretKey: ACCESS_KEY_ID # Key in the Kubernetes Secret + remoteRef: + key: platform/s3-credentials # Path in Vault + property: ACCESS_KEY_ID # Property to extract from the Vault secret + - secretKey: REGION # Key in the Kubernetes Secret + remoteRef: + key: platform/s3-credentials # Path in Vault + property: REGION # Property to extract from the Vault secret + - secretKey: SECRET_ACCESS_KEY # Key in the Kubernetes Secret + remoteRef: + key: platform/s3-credentials # Path in Vault + property: SECRET_ACCESS_KEY # Property to extract from the Vault secret diff --git a/argo/apps/500-trino/values.yaml b/argo/apps/500-trino/values.yaml index 343604d..623a767 100644 --- a/argo/apps/500-trino/values.yaml +++ b/argo/apps/500-trino/values.yaml @@ -1,13 +1,41 @@ # Custom values for the Trino Helm chart # Values placed under the 'trino:' key will be passed to the subchart. trino: + image: + repository: zot.cloudfleet.platform.5ha.re/trinodb/trino + tag: "443" + additionalConfigProperties: + - http-server.process-forwarded=true + - internal-communication.shared-secret=dug5cTAEayZB6S6R+OghCfQBUXcE4pDrNe0Z6I4cqMEWDjn7xQFJrLq2xB8GBlvMp9FAL7E894wGCymJreMBaCUbH1VuWUf+HXTFG/gtgqhWRISR4d6ZK9mRoOlAjVQYkpg0P94phqfkRyCSAcx6Zvz+k3eRdTnh2059IRdTrOyT9VFuiU7WOWt21/dgnKgjmexKFPzxcY5Pxg6/e2J7k0Pk6R+gNALaCeRYNt/eMG7w/+EYnwtCTdiAJuvZcCQrk7qZzAj0NGLPr4VSEy+SI/vHQct5sYHTwLFeupe3kkWNPwKmDKxVIssJnoOznNCyFEMAGAwHdl4AuHR4ZosxckE4Pgn/PktCPOAi4fJVAzhsnoZcftDDR4OXG1LHIUZ50jZtpiDBSWY/pzDhgPUgDC5n6a4LWi4Z76qYVfxDg+irc1DSY9YphXCHOb0telJUvzrjp9+TMCq9DZsL+IT3EZCIBn8tClVtf7lfN4VEFgVYjQTV7VwSohY3aY3SyJr1k/PQkhSgUQ1kW+YfI/vsrWqkuchNCmqjt1N3ewdyNsRgeGoOT7nFUgDoNv5umlvM3bwdHKT/4RQpe7z+6N7xyaojMzq4PK0sCVj1wpkKuiRy35LxU5O6ulfWUI5REdzJE2ODn/qOmYRnnyM55m8vOpYgb9vg1ZlFNUymP/bojqY= + server: + workers: 1 + config: + authenticationType: "PASSWORD" + coordinator: + resources: + requests: + cpu: 250m + memory: 1Gi + limits: + cpu: 1000m + memory: 1Gi + worker: + # nodeSelector: + # node.kubernetes.io/instance-type: "ccx33" + resources: + requests: + cpu: 250m + memory: 1Gi + limits: + cpu: 1000m + memory: 1Gi envFrom: - - secretRef: - name: s3-credentials - prefix: S3_ - - secretRef: - name: pg-credentials - prefix: PG_ + - secretRef: + name: s3-credentials + prefix: S3_ + - secretRef: + name: pg-credentials + prefix: PG_ catalogs: iceberg: | connector.name=iceberg @@ -17,11 +45,25 @@ trino: iceberg.jdbc-catalog.connection-url=jdbc:postgresql://${ENV:PG_HOST}:${ENV:PG_PORT}/catalog iceberg.jdbc-catalog.connection-user=${ENV:PG_USERNAME} iceberg.jdbc-catalog.connection-password=${ENV:PG_PASSWORD} - iceberg.jdbc-catalog.default-warehouse-dir=s3://dp-stack-tf-os + iceberg.jdbc-catalog.default-warehouse-dir=s3://inno-days-bucket/platform/iceberg/ fs.native-s3.enabled=true s3.endpoint=${ENV:S3_ENDPOINT} s3.region=${ENV:S3_REGION} s3.aws-access-key=${ENV:S3_ACCESS_KEY_ID} s3.aws-secret-key=${ENV:S3_SECRET_ACCESS_KEY} + auth: + passwordAuth: "admin:$2b$08$mJ4nVo8R63bcmJlBNxOsreqLaQF3ajVGAgvHZ28QAOOJ23VFgYTxG" - + # Configure ingress for Trino + ingress: + enabled: true + annotations: + # Use Traefik's websecure entrypoint for HTTPS + traefik.ingress.kubernetes.io/router.entrypoints: websecure + # Enable TLS termination at the router (Traefik handles certs) + traefik.ingress.kubernetes.io/router.tls: "true" + hosts: + - host: trino.cloudfleet.platform.5ha.re + paths: + - path: / + pathType: Prefix diff --git a/argo/apps/600-zitadel/zitadel.yaml b/argo/apps/600-zitadel/application.yaml similarity index 81% rename from argo/apps/600-zitadel/zitadel.yaml rename to argo/apps/600-zitadel/application.yaml index d0d5594..7997337 100644 --- a/argo/apps/600-zitadel/zitadel.yaml +++ b/argo/apps/600-zitadel/application.yaml @@ -5,14 +5,14 @@ metadata: namespace: argocd # Add finalizer to ensure that Helm release is deleted before the app finalizers: - - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer spec: project: default source: # Source is the Git repository containing this Application manifest and the wrapper chart repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git # Assuming the same repo path: argo/apps/600-zitadel # Path to the zitadel wrapper chart directory - targetRevision: HEAD # Or your specific branch/tag + targetRevision: cloudfleet # Or your specific branch/tag # Helm configuration for the wrapper chart helm: @@ -28,4 +28,4 @@ spec: prune: true selfHeal: true syncOptions: - - CreateNamespace=true # Ensure the zitadel namespace is created + - CreateNamespace=true # Ensure the zitadel namespace is created diff --git a/argo/apps/600-zitadel/templates/certificate.yaml b/argo/apps/600-zitadel/templates/certificate.yaml new file mode 100644 index 0000000..1f9971d --- /dev/null +++ b/argo/apps/600-zitadel/templates/certificate.yaml @@ -0,0 +1,11 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: zitadel-cert +spec: + secretName: zitadel-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - zitadel.cloudfleet.platform.5ha.re diff --git a/argo/apps/600-zitadel/templates/zitadel-config.yaml b/argo/apps/600-zitadel/templates/zitadel-config.yaml new file mode 100644 index 0000000..d2a24a3 --- /dev/null +++ b/argo/apps/600-zitadel/templates/zitadel-config.yaml @@ -0,0 +1,117 @@ +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: zitadel-config +spec: + refreshInterval: 300s # Refresh every 5 minutes + secretStoreRef: + name: vault-backend + kind: ClusterSecretStore + target: + template: + engineVersion: v2 + data: + config.yaml: | + FirstInstance: + Org: + Human: + Username: "admin" + Password: "SecureP@ssw0rd!" + FirstName: "Default" + LastName: "Admin" + Email: + Address: "cloudfleet@5ha.re" + Verified: true + Log: + Level: debug + Formatter: + Format: text + + Database: + postgres: + Host: "{{`{{ .postgres_host }}`}}" + Port: "{{`{{ .postgres_port }}`}}" + Database: "{{`{{ .postgres_dbname }}`}}" + User: + Username: "{{`{{ .postgres_user }}`}}" + Password: "{{`{{ .postgres_password }}`}}" + SSL: + Mode: disable + Admin: + ExistingDatabase: zitadel + Username: "{{`{{ .postgres_user }}`}}" + Password: "{{`{{ .postgres_password }}`}}" + SSL: + Mode: disable + + Machine: + Identification: + Hostname: "zitadel.cloudfleet.platform.5ha.re" + + + DefaultInstance: + DomainPolicy: + LoginPolicy: + AllowUsernamePassword: true + AllowRegister: true + AllowExternalIDP: false + SMTPConfiguration: + SMTP: + Host: "{{`{{ .smtp_host }}`}}" + User: "{{`{{ .smtp_username }}`}}" + Password: "{{`{{ .smtp_password }}`}}" + From: "cloudfleet@5ha.re" + FromName: "Cloudfleet Platform" + data: + - secretKey: postgres_host + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: host + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: postgres_port + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: port + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: postgres_dbname + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: dbname + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: postgres_user + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: user + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: postgres_password + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: password + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: smtp_host + remoteRef: + key: platform/smtp-credentials + property: host + - secretKey: smtp_username + remoteRef: + key: platform/smtp-credentials + property: username + - secretKey: smtp_password + remoteRef: + key: platform/smtp-credentials + property: password diff --git a/argo/apps/600-zitadel/templates/zitadel-envs.yaml b/argo/apps/600-zitadel/templates/zitadel-envs.yaml new file mode 100644 index 0000000..b0b3a80 --- /dev/null +++ b/argo/apps/600-zitadel/templates/zitadel-envs.yaml @@ -0,0 +1,97 @@ +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: zitadel-envs +spec: + refreshInterval: 300s # Refresh every 5 minutes + secretStoreRef: + name: vault-backend + kind: ClusterSecretStore + target: + template: + engineVersion: v2 + data: + ZITADEL_LOG_LEVEL: "info" + ZITADEL_EXTERNALPORT: "443" + ZITADEL_EXTERNALSECURE: "true" + ZITADEL_EXTERNALDOMAIN: "zitadel.cloudfleet.platform.5ha.re" + ZITADEL_TLS_ENABLED: "false" + ZITADEL_DATABASE_POSTGRES_HOST: "{{`{{ .postgres_host }}`}}" + ZITADEL_DATABASE_POSTGRES_PORT: "{{`{{ .postgres_port }}`}}" + ZITADEL_DATABASE_POSTGRES_DATABASE: "{{`{{ .postgres_dbname }}`}}" + ZITADEL_DATABASE_POSTGRES_MAXOPENCONNS: "10" + ZITADEL_DATABASE_POSTGRES_MAXIDLECONNS: "5" + ZITADEL_DATABASE_POSTGRES_MAXCONNLIFETIME: "30m" + ZITADEL_DATABASE_POSTGRES_MAXCONNIDLETIME: "5m" + ZITADEL_DATABASE_POSTGRES_USER_USERNAME: "{{`{{ .postgres_user }}`}}" + ZITADEL_DATABASE_POSTGRES_USER_PASSWORD: "{{`{{ .postgres_password }}`}}" + ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE: "disable" + ZITADEL_DATABASE_POSTGRES_ADMIN_EXISTINGDATABASE: "zitadel" + ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME: "{{`{{ .postgres_user }}`}}" + ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD: "{{`{{ .postgres_password }}`}}" + ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE: "disable" + ZITADEL_FIRSTINSTANCE_ORG_HUMAN_USERNAME: "admin" + ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORD: "SecureP@ssw0rd!" + ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_HOST: "{{`{{ .smtp_host }}:{{ .smtp_port }}`}}" + ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_USERNAME: "{{`{{ .smtp_username }}`}}" + ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_PASSWORD: "{{`{{ .smtp_password }}`}}" + ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROM: "cloudfleet@5ha.re" + ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROM_NAME: "Gergely and Jonathan" + ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_REPLYTOADDRESS: "jan.vanbuel@dataminded.com" + data: + - secretKey: postgres_host + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: host + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: postgres_port + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: port + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: postgres_dbname + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: dbname + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: postgres_user + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: user + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: postgres_password + remoteRef: + key: my-cute-postgres-cluster-pguser-zitadel + property: password + sourceRef: + storeRef: + name: k8s-services + kind: ClusterSecretStore + - secretKey: smtp_host + remoteRef: + key: platform/smtp-credentials + property: host + - secretKey: smtp_port + remoteRef: + key: platform/smtp-credentials + property: port + - secretKey: smtp_username + remoteRef: + key: platform/smtp-credentials + property: username + - secretKey: smtp_password + remoteRef: + key: platform/smtp-credentials + property: password diff --git a/argo/apps/600-zitadel/values.yaml b/argo/apps/600-zitadel/values.yaml index 3632125..be58cb7 100644 --- a/argo/apps/600-zitadel/values.yaml +++ b/argo/apps/600-zitadel/values.yaml @@ -3,20 +3,34 @@ zitadel: zitadel: masterkey: x123456789012345678901234567891y configmapConfig: - ExternalSecure: false - ExternalDomain: 127.0.0.1.sslip.io - TLS: - Enabled: false - Database: - Postgres: - MaxOpenConns: 20 - MaxIdleConns: 10 - MaxConnLifetime: 30m - MaxConnIdleTime: 5m - User: - Username: postgres - SSL: - Mode: disable - configSecretName: zitadel-credentials - configSecretKey: config.yaml - \ No newline at end of file + ExternalSecure: true + ExternalDomain: "zitadel.cloudfleet.platform.5ha.re" + # TLS: + # Enabled: false # TLS termination at ingress level + # ExternalPort: 443 + # Use External Secret with template that combines: + # - PostgreSQL credentials from k8s-services ClusterSecretStore (Crunchy-generated secret) + + # - SMTP credentials from vault-backend ClusterSecretStore + # configSecretName: zitadel-config + + # configSecretKey: config.yaml + # dbSslCaCrtSecret: pg-certificate + envVarsSecret: zitadel-envs + ingress: + enabled: true + className: traefik + annotations: + # Use Traefik's websecure entrypoint for HTTPS + traefik.ingress.kubernetes.io/router.entrypoints: websecure + # Enable TLS termination at the router (Traefik handles certs) + traefik.ingress.kubernetes.io/router.tls: "true" + hosts: + - host: zitadel.cloudfleet.platform.5ha.re + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - zitadel.cloudfleet.platform.5ha.re + secretName: zitadel-tls diff --git a/bootstrap/.gitignore b/bootstrap/.gitignore new file mode 100644 index 0000000..4a424df --- /dev/null +++ b/bootstrap/.gitignore @@ -0,0 +1 @@ +secret.yaml diff --git a/bootstrap/argocd-ingress.yaml b/bootstrap/argocd-ingress.yaml deleted file mode 100644 index 8cc42d9..0000000 --- a/bootstrap/argocd-ingress.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: argocd-server-ingress - namespace: argocd - annotations: - # Specify Traefik as the ingress controller (if using legacy annotation) - # kubernetes.io/ingress.class: traefik - # Use Traefik's websecure entrypoint for HTTPS - traefik.ingress.kubernetes.io/router.entrypoints: websecure - # Enable TLS termination at the router (Traefik will handle certs, often self-signed by default for testing) - traefik.ingress.kubernetes.io/router.tls: "true" - # Traefik will connect to the backend using HTTP (default scheme) - # You might need a cert-manager annotation here too if using it for proper certs - ingress.kubernetes.io/ssl-redirect: "false" -spec: - # Define TLS section for the host (required when router.tls is true) - # Traefik might generate a default cert if none is specified - rules: - - host: argocd.localhost - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: argocd-server # The ArgoCD server service - port: - # Use the HTTP port for the ArgoCD server service - number: 80 diff --git a/bootstrap/kustomization.yaml b/bootstrap/kustomization.yaml index 3c23fe0..01d496a 100644 --- a/bootstrap/kustomization.yaml +++ b/bootstrap/kustomization.yaml @@ -1,11 +1,10 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: argocd - # ServersTransport removed as backend connection is now HTTP +namespace: argocd # ServersTransport removed as backend connection is now HTTP resources: - namespace.yaml - https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml -- argocd-ingress.yaml +- preemptible-pod.yaml # Patch the argocd-cm ConfigMap (for URL) using strategic merge patches: diff --git a/bootstrap/preemptible-pod.yaml b/bootstrap/preemptible-pod.yaml new file mode 100644 index 0000000..3d4b8b2 --- /dev/null +++ b/bootstrap/preemptible-pod.yaml @@ -0,0 +1,47 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: low-priority +value: -1000 +globalDefault: false +description: "Priority class for pods that can be easily evicted" +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: preemptible-pods +spec: + serviceName: preemptible-pods + replicas: 4 + selector: + matchLabels: + app: preemptible + template: + metadata: + labels: + app: preemptible + spec: + priorityClassName: low-priority + containers: + - name: preemptible-container + image: busybox + command: [ "sleep", "infinity" ] + resources: + requests: + cpu: "1" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" +--- +apiVersion: v1 +kind: Service +metadata: + name: preemptible-pods +spec: + ports: + - port: 80 + name: http + clusterIP: None + selector: + app: preemptible diff --git a/bootstrap/root-app.yaml b/bootstrap/root-app.yaml index 59cb004..1653aa9 100644 --- a/bootstrap/root-app.yaml +++ b/bootstrap/root-app.yaml @@ -5,21 +5,22 @@ metadata: namespace: argocd # Add finalizer to ensure that apps are deleted before the root app finalizers: - - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer + - argocd.argoproj.io/resources-finalizer # Use domain-qualified finalizer spec: project: default source: repoURL: https://github.com/datamindedbe/playground-data-platform-stack.git # Replace if your repo URL is different - targetRevision: HEAD # Or specify a branch/tag + targetRevision: cloudfleet # Or specify a branch/tag path: argo/apps # Directory containing the application manifests directory: recurse: true # Automatically discover apps in subdirectories + include: "**/application.yaml" # Only include files named application.yaml destination: server: https://kubernetes.default.svc # Target the local cluster # Namespace is not needed here as child apps define their own destinations syncPolicy: automated: - prune: true # Delete resources not defined in Git + prune: true # Delete resources not defined in Git selfHeal: true # Automatically sync if state deviates syncOptions: - - CreateNamespace=true # Automatically create namespaces for apps if they don't exist + - CreateNamespace=true # Automatically create namespaces for apps if they don't exist diff --git a/cloudfleet/2048.yaml b/cloudfleet/2048.yaml new file mode 100644 index 0000000..fb5330a --- /dev/null +++ b/cloudfleet/2048.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: game-2048 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: game-2048 + name: deployment-2048 +spec: + selector: + matchLabels: + app.kubernetes.io/name: app-2048 + replicas: 2 + template: + metadata: + labels: + app.kubernetes.io/name: app-2048 + spec: + containers: + - image: public.ecr.aws/l6m2t8p7/docker-2048:latest + imagePullPolicy: Always + name: app-2048 + ports: + - containerPort: 80 + resources: + requests: + cpu: "0.5" +--- +apiVersion: v1 +kind: Service +metadata: + namespace: game-2048 + name: service-2048 +spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: game + type: LoadBalancer + selector: + app.kubernetes.io/name: app-2048 diff --git a/debug-zot.sh b/debug-zot.sh new file mode 100644 index 0000000..7fdfd2a --- /dev/null +++ b/debug-zot.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +echo "=== Debugging applications ===" +echo "" + +echo "1. Checking PostgreSQL cluster status:" +kubectl get postgrescluster -n services + +echo "" +echo "2. Checking PostgreSQL secrets (Crunchy-generated):" +kubectl get secrets -n services | grep pguser + +echo "" +echo "3. Checking zot pod status:" +kubectl get pods -n services -l app.kubernetes.io/name=zot + +echo "" +echo "4. Checking zitadel pod status:" +kubectl get pods -n services -l app.kubernetes.io/name=zitadel + +echo "" +echo "5. Checking services:" +kubectl get svc -n services + +echo "" +echo "6. Checking ingress:" +kubectl get ingress -n services + +echo "" +echo "7. Checking ExternalSecret status:" +kubectl get externalsecrets -n services -o wide + +echo "" +echo "8. Latest zot logs (last 10 lines):" +kubectl logs -n services -l app.kubernetes.io/name=zot --tail=10 + +echo "" +echo "9. Latest zitadel logs (last 10 lines):" +kubectl logs -n services -l app.kubernetes.io/name=zitadel --tail=10 + +echo "" +echo "10. Traefik logs (looking for 504 errors):" +kubectl logs -n traefik-system -l app.kubernetes.io/name=traefik --tail=20 | grep -E "(504|timeout|error)" || echo "No recent 504/timeout errors in Traefik logs" + +echo "" +echo "=== To monitor logs in real-time ===" +echo "Zot logs: kubectl logs -n services -l app.kubernetes.io/name=zot -f" +echo "Zitadel logs: kubectl logs -n services -l app.kubernetes.io/name=zitadel -f" +echo "PostgreSQL logs: kubectl logs -n services -l postgres-operator.crunchydata.com/cluster=my-cute-postgres-cluster -f" diff --git a/manual/.gitignore b/manual/.gitignore new file mode 100644 index 0000000..cae93a1 --- /dev/null +++ b/manual/.gitignore @@ -0,0 +1 @@ +vault-secret.yaml diff --git a/manual/test-secret.yaml b/manual/test-secret.yaml new file mode 100644 index 0000000..06dc744 --- /dev/null +++ b/manual/test-secret.yaml @@ -0,0 +1,22 @@ +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: gergely-little-secret + namespace: default # Change this to your target namespace +spec: + refreshInterval: 300s # Refresh every 5 minutes + secretStoreRef: + name: vault-backend + kind: ClusterSecretStore + # target: + # name: gergely-little-secret # Name of the Kubernetes Secret to create + # creationPolicy: Owner + # data: + # - secretKey: secret-data # Key in the Kubernetes Secret + # remoteRef: + # key: gergely/little/secret # Path in Vault + # property: data # Property to extract from the Vault secret + dataFrom: + - extract: + key: gergely/little/secret # Path in Vault + # property: data # Property to extract from the Vault secret diff --git a/manual/vault-secret-example.yaml b/manual/vault-secret-example.yaml new file mode 100644 index 0000000..047191e --- /dev/null +++ b/manual/vault-secret-example.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: eso-vault-secret + namespace: services +stringData: + token: mysecrettoken