diff --git a/k8s/instance/list b/k8s/instance/list index a11a8d07..4d327699 100644 --- a/k8s/instance/list +++ b/k8s/instance/list @@ -36,4 +36,4 @@ echo "$PODS" | jq --argjson limit ${LIMIT:-10} '{ launch_time: .metadata.creationTimestamp, spot: (.spec.nodeName // "" | test("spot"; "i")) }) -}' \ No newline at end of file +}' diff --git a/k8s/instance/tests/build_context.bats b/k8s/instance/tests/build_context.bats new file mode 100644 index 00000000..3148f3f8 --- /dev/null +++ b/k8s/instance/tests/build_context.bats @@ -0,0 +1,107 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for instance/build_context - instance parameter extraction +# ============================================================================= + +setup() { + export PROJECT_ROOT="$(cd "$BATS_TEST_DIRNAME/../../.." && pwd)" + source "$PROJECT_ROOT/testing/assertions.sh" + + export SCRIPT="$BATS_TEST_DIRNAME/../build_context" + + export CONTEXT='{ + "arguments": { + "application_id": "app-123", + "scope_id": "scope-456", + "deployment_id": "deploy-789" + } + }' + + export LIMIT=10 +} + +teardown() { + unset CONTEXT LIMIT APPLICATION_ID SCOPE_ID DEPLOYMENT_ID +} + +# ============================================================================= +# Success flow +# ============================================================================= +@test "instance/build_context: exports all parameters correctly" { + source "$SCRIPT" + + assert_equal "$APPLICATION_ID" "app-123" + assert_equal "$SCOPE_ID" "scope-456" + assert_equal "$DEPLOYMENT_ID" "deploy-789" + assert_equal "$LIMIT" "10" +} + +@test "instance/build_context: produces no stdout output" { + run bash -c 'source "$SCRIPT"' + + assert_equal "$status" "0" + assert_equal "$output" "" +} + +# ============================================================================= +# Array argument handling +# ============================================================================= +@test "instance/build_context: handles array arguments (takes first element)" { + export CONTEXT='{ + "arguments": { + "application_id": ["app-first", "app-second"], + "scope_id": ["scope-first", "scope-second"], + "deployment_id": ["deploy-first", "deploy-second"] + } + }' + + source "$SCRIPT" + + assert_equal "$APPLICATION_ID" "app-first" + assert_equal "$SCOPE_ID" "scope-first" + assert_equal "$DEPLOYMENT_ID" "deploy-first" +} + +# ============================================================================= +# Missing / null arguments +# ============================================================================= +@test "instance/build_context: handles missing arguments" { + export CONTEXT='{ + "arguments": {} + }' + + source "$SCRIPT" + + assert_equal "$APPLICATION_ID" "null" + assert_equal "$SCOPE_ID" "null" + assert_equal "$DEPLOYMENT_ID" "null" +} + +@test "instance/build_context: handles null arguments object" { + export CONTEXT='{}' + + source "$SCRIPT" + + assert_empty "$APPLICATION_ID" + assert_empty "$SCOPE_ID" + assert_empty "$DEPLOYMENT_ID" +} + +# ============================================================================= +# LIMIT handling +# ============================================================================= +@test "instance/build_context: uses default LIMIT of 10 when not set" { + unset LIMIT + + source "$SCRIPT" + + assert_equal "$LIMIT" "10" +} + +@test "instance/build_context: preserves custom LIMIT" { + export LIMIT=50 + + source "$SCRIPT" + + assert_equal "$LIMIT" "50" +} diff --git a/k8s/instance/tests/list.bats b/k8s/instance/tests/list.bats new file mode 100644 index 00000000..7c7831e4 --- /dev/null +++ b/k8s/instance/tests/list.bats @@ -0,0 +1,377 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for instance/list - list instances/pods with details +# ============================================================================= + +setup() { + export PROJECT_ROOT="$(cd "$BATS_TEST_DIRNAME/../../.." && pwd)" + source "$PROJECT_ROOT/testing/assertions.sh" + + export SCRIPT="$BATS_TEST_DIRNAME/../list" + + export NAMESPACE_OVERRIDE="" + export APPLICATION_ID="app-123" + export SCOPE_ID="scope-456" + export DEPLOYMENT_ID="deploy-789" + export LIMIT=10 + + # Default kubectl mock — two pods with different characteristics + kubectl() { + echo '{ + "items": [ + { + "metadata": { + "name": "app-pod-1", + "namespace": "nullplatform", + "labels": { + "nullplatform": "true", + "application_id": "app-123", + "scope_id": "scope-456" + }, + "creationTimestamp": "2024-01-01T10:00:00Z" + }, + "spec": { + "nodeName": "node-1", + "containers": [{ + "name": "main", + "resources": { + "requests": {"cpu": "100m", "memory": "128Mi"}, + "limits": {"cpu": "500m", "memory": "512Mi"} + } + }] + }, + "status": { + "phase": "Running", + "podIP": "10.0.0.5", + "containerStatuses": [{ + "name": "main", + "ready": true, + "image": "myapp:latest" + }] + } + }, + { + "metadata": { + "name": "app-pod-2", + "namespace": "nullplatform", + "labels": { + "nullplatform": "true", + "application_id": "app-123", + "scope_id": "scope-456" + }, + "creationTimestamp": "2024-01-01T11:00:00Z" + }, + "spec": { + "nodeName": "spot-node-1", + "containers": [{ + "name": "main", + "resources": { + "requests": {"cpu": "200m", "memory": "256Mi"}, + "limits": {"cpu": "1000m", "memory": "1Gi"} + } + }] + }, + "status": { + "phase": "Running", + "podIP": "10.0.0.6", + "containerStatuses": [{ + "name": "main", + "ready": true, + "image": "myapp:arm64" + }] + } + } + ] + }' + } + export -f kubectl +} + +teardown() { + unset NAMESPACE_OVERRIDE APPLICATION_ID SCOPE_ID DEPLOYMENT_ID LIMIT + unset -f kubectl +} + +# ============================================================================= +# Full JSON structure validation +# ============================================================================= +@test "instance/list: produces complete JSON with all expected fields" { + run bash "$SCRIPT" + + assert_equal "$status" "0" + + local expected_json='{ + "results": [ + { + "id": "app-pod-1", + "selector": { + "nullplatform": "true", + "application_id": "app-123", + "scope_id": "scope-456" + }, + "details": { + "namespace": "nullplatform", + "ip": "10.0.0.5", + "dns": "10.0.0.5.nullplatform.pod.cluster.local", + "cpu": { + "requested": 0.1, + "limit": 0.5 + }, + "memory": { + "requested": "128Mi", + "limit": "512Mi" + }, + "architecture": "x86" + }, + "state": "Running", + "launch_time": "2024-01-01T10:00:00Z", + "spot": false + }, + { + "id": "app-pod-2", + "selector": { + "nullplatform": "true", + "application_id": "app-123", + "scope_id": "scope-456" + }, + "details": { + "namespace": "nullplatform", + "ip": "10.0.0.6", + "dns": "10.0.0.6.nullplatform.pod.cluster.local", + "cpu": { + "requested": 0.2, + "limit": 1 + }, + "memory": { + "requested": "256Mi", + "limit": "1Gi" + }, + "architecture": "arm64" + }, + "state": "Running", + "launch_time": "2024-01-01T11:00:00Z", + "spot": true + } + ] + }' + + assert_json_equal "$output" "$expected_json" "Complete instance list output" +} + +# ============================================================================= +# LIMIT handling +# ============================================================================= +@test "instance/list: respects LIMIT parameter" { + export LIMIT=1 + + run bash "$SCRIPT" + + assert_equal "$status" "0" + local count=$(echo "$output" | jq '.results | length') + assert_equal "$count" "1" + + local id=$(echo "$output" | jq -r '.results[0].id') + assert_equal "$id" "app-pod-1" +} + +# ============================================================================= +# Label selector construction +# ============================================================================= +@test "instance/list: builds label selector with all filters" { + kubectl() { + if [[ "$*" == *"-l nullplatform=true,application_id=app-123,scope_id=scope-456,deployment_id=deploy-789"* ]]; then + echo '{"items":[]}' + else + echo "Unexpected label selector: $*" >&2 + return 1 + fi + } + export -f kubectl + + run bash "$SCRIPT" + + assert_equal "$status" "0" +} + +@test "instance/list: builds label selector with only APPLICATION_ID" { + export SCOPE_ID="" + export DEPLOYMENT_ID="" + + kubectl() { + if [[ "$*" == *"-l nullplatform=true,application_id=app-123"* ]]; then + echo '{"items":[]}' + else + echo "Unexpected label selector: $*" >&2 + return 1 + fi + } + export -f kubectl + + run bash "$SCRIPT" + + assert_equal "$status" "0" +} + +@test "instance/list: excludes null filter values from label selector" { + export APPLICATION_ID="null" + export SCOPE_ID="null" + export DEPLOYMENT_ID="null" + + kubectl() { + if [[ "$*" == *"-l nullplatform=true"* ]] && [[ "$*" != *"application_id"* ]]; then + echo '{"items":[]}' + else + echo "Unexpected label selector: $*" >&2 + return 1 + fi + } + export -f kubectl + + run bash "$SCRIPT" + + assert_equal "$status" "0" +} + +# ============================================================================= +# Namespace handling +# ============================================================================= +@test "instance/list: uses default nullplatform namespace" { + kubectl() { + if [[ "$*" == *"-n nullplatform"* ]]; then + echo '{"items":[]}' + else + echo "Expected default namespace: $*" >&2 + return 1 + fi + } + export -f kubectl + + run bash "$SCRIPT" + + assert_equal "$status" "0" +} + +@test "instance/list: uses NAMESPACE_OVERRIDE when set" { + export NAMESPACE_OVERRIDE="custom-namespace" + + kubectl() { + if [[ "$*" == *"-n custom-namespace"* ]]; then + echo '{"items":[]}' + else + echo "Expected namespace override: $*" >&2 + return 1 + fi + } + export -f kubectl + + run bash "$SCRIPT" + + assert_equal "$status" "0" +} + +# ============================================================================= +# Edge cases +# ============================================================================= +@test "instance/list: handles empty pod list" { + kubectl() { + echo '{"items":[]}' + } + export -f kubectl + + run bash "$SCRIPT" + + assert_equal "$status" "0" + assert_json_equal "$output" '{"results": []}' "Empty pod list" +} + +@test "instance/list: handles pending pod without IP" { + kubectl() { + echo '{ + "items": [{ + "metadata": { + "name": "pending-pod", + "namespace": "nullplatform", + "labels": {"nullplatform": "true"}, + "creationTimestamp": "2024-01-01T10:00:00Z" + }, + "spec": { + "containers": [{ + "name": "main", + "resources": {} + }] + }, + "status": { + "phase": "Pending", + "containerStatuses": [{ + "name": "main", + "image": "myapp:latest" + }] + } + }] + }' + } + export -f kubectl + + run bash "$SCRIPT" + + assert_equal "$status" "0" + + local expected_json='{ + "results": [{ + "id": "pending-pod", + "selector": {"nullplatform": "true"}, + "details": { + "namespace": "nullplatform", + "ip": "pending", + "dns": "pending", + "cpu": {"requested": 0, "limit": 0}, + "memory": {"requested": "0Mi", "limit": "0Mi"}, + "architecture": "x86" + }, + "state": "Pending", + "launch_time": "2024-01-01T10:00:00Z", + "spot": false + }] + }' + + assert_json_equal "$output" "$expected_json" "Pending pod output" +} + +@test "instance/list: handles pod without nodeName (spot defaults to false)" { + kubectl() { + echo '{ + "items": [{ + "metadata": { + "name": "no-node-pod", + "namespace": "nullplatform", + "labels": {}, + "creationTimestamp": "2024-01-01T10:00:00Z" + }, + "spec": { + "containers": [{ + "name": "main", + "resources": { + "requests": {"cpu": "500m", "memory": "256Mi"}, + "limits": {"cpu": "1000m", "memory": "512Mi"} + } + }] + }, + "status": { + "phase": "Pending", + "podIP": "10.0.0.7", + "containerStatuses": [{ + "name": "main", + "image": "myapp:latest" + }] + } + }] + }' + } + export -f kubectl + + run bash "$SCRIPT" + + assert_equal "$status" "0" + local spot=$(echo "$output" | jq -r '.results[0].spot') + assert_equal "$spot" "false" +} diff --git a/k8s/log/build_context b/k8s/log/build_context index a1a7697e..cec261a9 100755 --- a/k8s/log/build_context +++ b/k8s/log/build_context @@ -10,6 +10,13 @@ export INSTANCE_ID=$(echo "$NP_ACTION_CONTEXT" | jq -r '.notification.arguments. export LIMIT=$(echo "$NP_ACTION_CONTEXT" | jq -r '.notification.arguments.limit // empty') if [ -z "$APPLICATION_ID" ]; then - echo "Error: Missing required parameters: APPLICATION_ID" >&2 + log error "❌ APPLICATION_ID is missing" + log error "" + log error "💡 Possible causes:" + log error " The log request did not include the required application_id parameter" + log error "" + log error "🔧 How to fix:" + log error " • Ensure the log action includes application_id in the request" + log error "" exit 1 fi diff --git a/k8s/log/log b/k8s/log/log index 2652f0af..e2c38314 100644 --- a/k8s/log/log +++ b/k8s/log/log @@ -6,7 +6,16 @@ ARCH=$(uname -m) KUBE_LOGGER_SCRIPT="$SERVICE_PATH/log/kube-logger-go/bin/$PLATFORM/exec-$ARCH" if [ ! -f "$KUBE_LOGGER_SCRIPT" ]; then - echo "Error: kube-logger bash script not found at $KUBE_LOGGER_SCRIPT" >&2 + log error "❌ kube-logger binary not found" + log error "📋 Expected path: $KUBE_LOGGER_SCRIPT" + log error "" + log error "💡 Possible causes:" + log error " The kube-logger binary was not compiled for this platform/architecture" + log error "" + log error "🔧 How to fix:" + log error " • Verify the binary exists for $PLATFORM/$ARCH" + log error " • Rebuild the kube-logger binary using 'make build'" + log error "" exit 1 fi @@ -42,13 +51,13 @@ fi # Add optional start time (convert from milliseconds to ISO format) if [ -n "$START_TIME" ]; then SECONDS=$(echo "$START_TIME/1000" | bc) - + # Handle different date command versions for Alpine/busybox # Try different approaches for Alpine busybox date if ISO_DATE=$(date -u -d "@$SECONDS" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null); then # GNU date worked : - elif ISO_DATE=$(date -u -r "$SECONDS" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null); then + elif ISO_DATE=$(date -u -r "$SECONDS" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null); then # BSD date worked : else @@ -59,14 +68,14 @@ if [ -n "$START_TIME" ]; then # For a more robust solution, we would need a full date calculation print strftime("%Y-%m-%dT%H:%M:%SZ", ts) }' 2>/dev/null) - + # If awk strftime failed, use a different approach if [ -z "$ISO_DATE" ] || [ "$ISO_DATE" = "" ]; then ISO_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") fi fi - + CMD="$CMD --start-time $ISO_DATE" fi -eval "$CMD" \ No newline at end of file +eval "$CMD" diff --git a/k8s/log/tests/build_context.bats b/k8s/log/tests/build_context.bats new file mode 100644 index 00000000..7dc16246 --- /dev/null +++ b/k8s/log/tests/build_context.bats @@ -0,0 +1,138 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for log/build_context - log parameter extraction +# ============================================================================= + +setup() { + export PROJECT_ROOT="$(cd "$BATS_TEST_DIRNAME/../../.." && pwd)" + source "$PROJECT_ROOT/testing/assertions.sh" + + export SCRIPT="$BATS_TEST_DIRNAME/../build_context" + + log() { if [ "$1" = "error" ]; then echo "$2" >&2; else echo "$2"; fi; } + export -f log + + export NP_ACTION_CONTEXT='{ + "notification": { + "arguments": { + "scope_id": "scope-123", + "application_id": "app-456", + "deployment_id": "deploy-789", + "next_page_token": "token-abc", + "start_time": "1704067200000", + "filter_pattern": "ERROR", + "instance_id": "pod-xyz", + "limit": "100" + } + } + }' +} + +teardown() { + unset NP_ACTION_CONTEXT SCOPE_ID APPLICATION_ID DEPLOYMENT_ID + unset NEXT_PAGE_TOKEN START_TIME FILTER_PATTERN INSTANCE_ID LIMIT +} + +run_build_context() { + source "$SCRIPT" +} + +# ============================================================================= +# Success flow +# ============================================================================= +@test "log/build_context: exports all parameters correctly" { + run_build_context + + assert_equal "$SCOPE_ID" "scope-123" + assert_equal "$APPLICATION_ID" "app-456" + assert_equal "$DEPLOYMENT_ID" "deploy-789" + assert_equal "$NEXT_PAGE_TOKEN" "token-abc" + assert_equal "$START_TIME" "1704067200000" + assert_equal "$FILTER_PATTERN" "ERROR" + assert_equal "$INSTANCE_ID" "pod-xyz" + assert_equal "$LIMIT" "100" +} + +@test "log/build_context: produces no stdout output" { + run bash -c 'source "$SCRIPT"' + + assert_equal "$status" "0" + assert_equal "$output" "" +} + +# ============================================================================= +# deploy_id fallback +# ============================================================================= +@test "log/build_context: extracts DEPLOYMENT_ID from deploy_id fallback" { + export NP_ACTION_CONTEXT='{ + "notification": { + "arguments": { + "application_id": "app-456", + "deploy_id": "deploy-fallback" + } + } + }' + + run_build_context + + assert_equal "$DEPLOYMENT_ID" "deploy-fallback" +} + +# ============================================================================= +# Optional arguments +# ============================================================================= +@test "log/build_context: handles missing optional arguments" { + export NP_ACTION_CONTEXT='{ + "notification": { + "arguments": { + "application_id": "app-456" + } + } + }' + + run_build_context + + assert_equal "$APPLICATION_ID" "app-456" + assert_empty "$SCOPE_ID" + assert_empty "$DEPLOYMENT_ID" + assert_empty "$NEXT_PAGE_TOKEN" + assert_empty "$START_TIME" + assert_empty "$FILTER_PATTERN" + assert_empty "$INSTANCE_ID" + assert_empty "$LIMIT" +} + +# ============================================================================= +# Validation errors +# ============================================================================= +@test "log/build_context: fails with full error block when APPLICATION_ID is missing" { + export NP_ACTION_CONTEXT='{ + "notification": { + "arguments": { + "scope_id": "scope-123" + } + } + }' + + run bash "$BATS_TEST_DIRNAME/../build_context" + + assert_equal "$status" "1" + assert_contains "$output" "❌ APPLICATION_ID is missing" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "application_id parameter" + assert_contains "$output" "🔧 How to fix:" + assert_contains "$output" "application_id in the request" +} + +@test "log/build_context: fails with empty notification arguments" { + export NP_ACTION_CONTEXT='{ + "notification": { + "arguments": {} + } + }' + + run bash "$BATS_TEST_DIRNAME/../build_context" + + assert_equal "$status" "1" + assert_contains "$output" "❌ APPLICATION_ID is missing" +} diff --git a/k8s/log/workflows/log.yaml b/k8s/log/workflows/log.yaml index 615fa6d6..a0b04249 100644 --- a/k8s/log/workflows/log.yaml +++ b/k8s/log/workflows/log.yaml @@ -1,4 +1,13 @@ steps: + - name: load logging + type: script + file: "$SERVICE_PATH/logging" + output: + - name: log + type: function + parameters: + level: string + message: string - name: build context type: script file: "$SERVICE_PATH/log/build_context" diff --git a/k8s/metric/build_context b/k8s/metric/build_context index 8328aad8..1e9e1ed9 100755 --- a/k8s/metric/build_context +++ b/k8s/metric/build_context @@ -14,7 +14,15 @@ else fi if [[ -z "$PROM_URL" ]]; then - echo "There is no prometheus provider configured. Please configure prometheus in Platform settings and try again." >&2 + log error "❌ No Prometheus provider configured" + log error "" + log error "💡 Possible causes:" + log error " A metrics provider has not been linked to this scope or its parent entities" + log error "" + log error "🔧 How to fix:" + log error " • Configure a Prometheus provider in Platform Settings" + log error " • Link the metrics provider to the scope, application, or namespace" + log error "" exit 1 fi @@ -32,4 +40,4 @@ fi export PROM_URL export K8S_NAMESPACE -export SCOPE_ID \ No newline at end of file +export SCOPE_ID diff --git a/k8s/metric/tests/build_context.bats b/k8s/metric/tests/build_context.bats new file mode 100644 index 00000000..82e9c4d5 --- /dev/null +++ b/k8s/metric/tests/build_context.bats @@ -0,0 +1,162 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for metric/build_context - metric parameter extraction +# ============================================================================= + +setup() { + export PROJECT_ROOT="$(cd "$BATS_TEST_DIRNAME/../../.." && pwd)" + source "$PROJECT_ROOT/testing/assertions.sh" + + export SCRIPT="$BATS_TEST_DIRNAME/../build_context" + + log() { if [ "$1" = "error" ]; then echo "$2" >&2; else echo "$2"; fi; } + export -f log + + export K8S_NAMESPACE="nullplatform" + + np() { + case "$1" in + scope) + echo '{"nrn": "nrn:org=1:account=2:ns=3:app=4"}' + ;; + provider) + echo '{ + "results": [{ + "attributes": { + "server": { + "url": "http://prometheus:9090" + } + } + }] + }' + ;; + esac + } + export -f np + + export CONTEXT='{ + "arguments": { + "scope_id": "scope-123", + "application_id": "app-456", + "deployment_id": "deploy-789", + "metric": "system.cpu_usage_percentage", + "start_time": "2024-01-01T00:00:00Z", + "end_time": "2024-01-01T01:00:00Z", + "period": "60", + "group_by": ["instance_id"] + } + }' +} + +teardown() { + unset CONTEXT K8S_NAMESPACE PROMETHEUS_URL PROM_URL + unset SCOPE_ID APPLICATION_ID DEPLOYMENT_ID METRIC_NAME + unset START_TIME END_TIME PERIOD GROUP_BY + unset -f np +} + +run_build_context() { + source "$SCRIPT" +} + +# ============================================================================= +# Success flow +# ============================================================================= +@test "metric/build_context: exports all parameters correctly" { + run_build_context + + assert_equal "$SCOPE_ID" "scope-123" + assert_equal "$APPLICATION_ID" "app-456" + assert_equal "$DEPLOYMENT_ID" "deploy-789" + assert_equal "$METRIC_NAME" "system.cpu_usage_percentage" + assert_equal "$START_TIME" "2024-01-01T00:00:00Z" + assert_equal "$END_TIME" "2024-01-01T01:00:00Z" + assert_equal "$PERIOD" "60" + assert_equal "$K8S_NAMESPACE" "nullplatform" +} + +@test "metric/build_context: produces no stdout output" { + run bash "$BATS_TEST_DIRNAME/../build_context" + + assert_equal "$status" "0" + assert_equal "$output" "" +} + +# ============================================================================= +# Prometheus URL resolution +# ============================================================================= +@test "metric/build_context: uses PROMETHEUS_URL env var when set" { + export PROMETHEUS_URL="http://custom-prometheus:9090" + + run_build_context + + assert_equal "$PROM_URL" "http://custom-prometheus:9090" +} + +@test "metric/build_context: fetches prometheus URL from provider when not set" { + unset PROMETHEUS_URL + + run_build_context + + assert_equal "$PROM_URL" "http://prometheus:9090" +} + +# ============================================================================= +# Argument handling +# ============================================================================= +@test "metric/build_context: handles array arguments (joins with comma)" { + run_build_context + + assert_equal "$GROUP_BY" "instance_id" +} + +@test "metric/build_context: handles multiple array values" { + export CONTEXT='{ + "arguments": { + "scope_id": "scope-123", + "group_by": ["scope_id", "instance_id"] + } + }' + + run_build_context + + assert_equal "$GROUP_BY" "scope_id,instance_id" +} + +@test "metric/build_context: handles minimal arguments" { + export CONTEXT='{ + "arguments": { + "scope_id": "scope-123" + } + }' + + run_build_context + + assert_equal "$SCOPE_ID" "scope-123" + assert_not_empty "$PROM_URL" +} + +# ============================================================================= +# Validation errors +# ============================================================================= +@test "metric/build_context: fails with full error block when prometheus not found" { + unset PROMETHEUS_URL + + np() { + case "$1" in + scope) echo '{"nrn": "nrn:org=1:account=2:ns=3:app=4"}' ;; + provider) echo '{"results": []}' ;; + esac + } + export -f np + + run bash "$BATS_TEST_DIRNAME/../build_context" + + assert_equal "$status" "1" + assert_contains "$output" "❌ No Prometheus provider configured" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "metrics provider has not been linked" + assert_contains "$output" "🔧 How to fix:" + assert_contains "$output" "Configure a Prometheus provider" + assert_contains "$output" "Link the metrics provider" +} diff --git a/k8s/metric/tests/list.bats b/k8s/metric/tests/list.bats new file mode 100644 index 00000000..e631a36a --- /dev/null +++ b/k8s/metric/tests/list.bats @@ -0,0 +1,74 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for metric/list - list available metrics +# ============================================================================= + +setup() { + export PROJECT_ROOT="$(cd "$BATS_TEST_DIRNAME/../../.." && pwd)" + source "$PROJECT_ROOT/testing/assertions.sh" +} + +# ============================================================================= +# Full JSON structure validation +# ============================================================================= +@test "metric/list: produces complete JSON with all expected metrics" { + run bash "$BATS_TEST_DIRNAME/../list" + + assert_equal "$status" "0" + + local expected_json='{ + "results": [ + { + "name": "http.rpm", + "title": "Throughput", + "unit": "rpm", + "available_filters": ["scope_id", "instance_id"], + "available_group_by": ["instance_id"] + }, + { + "name": "http.response_time", + "title": "Response time", + "unit": "ms", + "available_filters": ["scope_id", "instance_id"], + "available_group_by": ["instance_id"] + }, + { + "name": "http.error_rate", + "title": "Error rate", + "unit": "%", + "available_filters": ["scope_id", "instance_id"], + "available_group_by": ["instance_id"] + }, + { + "name": "system.cpu_usage_percentage", + "title": "CPU usage", + "unit": "%", + "available_filters": ["scope_id", "instance_id"], + "available_group_by": ["instance_id"] + }, + { + "name": "system.memory_usage_percentage", + "title": "Memory usage percentage", + "unit": "%", + "available_filters": ["scope_id", "instance_id"], + "available_group_by": ["scope_id", "instance_id"] + }, + { + "name": "system.used_memory_kb", + "title": "Memory usage in kb", + "unit": "kb", + "available_filters": ["scope_id", "instance_id"], + "available_group_by": ["scope_id", "instance_id"] + }, + { + "name": "http.healthcheck_count", + "title": "Healthcheck", + "unit": "check", + "available_filters": ["scope_id", "instance_id"], + "available_group_by": ["instance_id"] + } + ] + }' + + assert_json_equal "$output" "$expected_json" "Complete metric list output" +} diff --git a/k8s/metric/tests/metric.bats b/k8s/metric/tests/metric.bats new file mode 100644 index 00000000..1bba6c7a --- /dev/null +++ b/k8s/metric/tests/metric.bats @@ -0,0 +1,358 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for metric/metric - Prometheus metric queries +# ============================================================================= + +setup() { + # Get project root directory + export PROJECT_ROOT="$(cd "$BATS_TEST_DIRNAME/../../.." && pwd)" + + # Source assertions + source "$PROJECT_ROOT/testing/assertions.sh" + + # Required environment variables + export PROM_URL="http://prometheus:9090" + export APPLICATION_ID="app-123" + export SCOPE_ID="scope-456" + export DEPLOYMENT_ID="deploy-789" + export METRIC_NAME="system.cpu_usage_percentage" + export START_TIME="2024-01-01T00:00:00Z" + export END_TIME="2024-01-01T01:00:00Z" + export PERIOD="60" + export GROUP_BY="" + + # Source the metric script to get functions + # We need to extract functions without running the main logic + eval "$(sed -n '/^get_metric_config()/,/^}/p' "$BATS_TEST_DIRNAME/../metric")" + eval "$(sed -n '/^build_filters()/,/^}/p' "$BATS_TEST_DIRNAME/../metric")" + eval "$(sed -n '/^build_query()/,/^}/p' "$BATS_TEST_DIRNAME/../metric")" + eval "$(sed -n '/^urlencode()/,/^}/p' "$BATS_TEST_DIRNAME/../metric")" + + # Mock curl for Prometheus queries + curl() { + echo '{ + "status": "success", + "data": { + "resultType": "matrix", + "result": [{ + "metric": {"scope_id": "scope-456"}, + "values": [[1704067200, "0.5"], [1704067260, "0.6"]] + }] + } + }' + } + export -f curl +} + +teardown() { + unset PROM_URL + unset APPLICATION_ID + unset SCOPE_ID + unset DEPLOYMENT_ID + unset METRIC_NAME + unset START_TIME + unset END_TIME + unset PERIOD + unset GROUP_BY + unset TIME_RANGE + unset INTERVAL + unset -f curl + unset -f get_metric_config + unset -f build_filters + unset -f build_query + unset -f urlencode +} + +# ============================================================================= +# get_metric_config tests +# ============================================================================= +@test "metric: get_metric_config returns gauge percent for cpu_usage_percentage" { + result=$(get_metric_config) + + assert_equal "$(echo $result | cut -d' ' -f1)" "gauge" + assert_equal "$(echo $result | cut -d' ' -f2)" "percent" +} + +@test "metric: get_metric_config returns gauge seconds for response_time" { + export METRIC_NAME="http.response_time" + + result=$(get_metric_config) + + assert_equal "$(echo $result | cut -d' ' -f2)" "seconds" +} + +@test "metric: get_metric_config returns gauge count_per_minute for rpm" { + export METRIC_NAME="http.rpm" + + result=$(get_metric_config) + + assert_equal "$(echo $result | cut -d' ' -f2)" "count_per_minute" +} + +@test "metric: get_metric_config returns gauge percent for error_rate" { + export METRIC_NAME="http.error_rate" + + result=$(get_metric_config) + + assert_equal "$(echo $result | cut -d' ' -f2)" "percent" +} + +@test "metric: get_metric_config returns gauge percent for memory_usage_percentage" { + export METRIC_NAME="system.memory_usage_percentage" + + result=$(get_metric_config) + + assert_equal "$(echo $result | cut -d' ' -f2)" "percent" +} + +@test "metric: get_metric_config returns gauge kilobytes for used_memory_kb" { + export METRIC_NAME="system.used_memory_kb" + + result=$(get_metric_config) + + assert_equal "$(echo $result | cut -d' ' -f2)" "kilobytes" +} + +@test "metric: get_metric_config returns gauge count for cronjob.execution_count" { + export METRIC_NAME="cronjob.execution_count" + + result=$(get_metric_config) + + assert_equal "$(echo $result | cut -d' ' -f2)" "count" +} + +@test "metric: get_metric_config returns gauge unknown for unrecognized metric" { + export METRIC_NAME="unknown.metric" + + result=$(get_metric_config) + + assert_equal "$(echo $result | cut -d' ' -f2)" "unknown" +} + +# ============================================================================= +# build_filters tests +# ============================================================================= +@test "metric: build_filters includes application_id" { + result=$(build_filters) + + assert_contains "$result" 'application_id="app-123"' +} + +@test "metric: build_filters includes scope_id" { + result=$(build_filters) + + assert_contains "$result" 'scope_id="scope-456"' +} + +@test "metric: build_filters includes deployment_id" { + result=$(build_filters) + + assert_contains "$result" 'deployment_id="deploy-789"' +} + +@test "metric: build_filters excludes null deployment_id" { + export DEPLOYMENT_ID="null" + + result=$(build_filters) + + [[ "$result" != *"deployment_id"* ]] +} + +@test "metric: build_filters handles empty deployment_id" { + export DEPLOYMENT_ID="" + + result=$(build_filters) + + [[ "$result" != *"deployment_id"* ]] +} + +@test "metric: build_filters builds comma-separated filters" { + result=$(build_filters) + + # Should have commas between filters + assert_contains "$result" "," +} + +# ============================================================================= +# build_query tests +# ============================================================================= +@test "metric: build_query generates cpu_usage query" { + filters=$(build_filters) + query=$(build_query "system.cpu_usage_percentage" "$filters" "5m") + + assert_contains "$query" "nullplatform_system_cpu_usage_percentage" + assert_contains "$query" "avg(" +} + +@test "metric: build_query generates memory_usage query" { + filters=$(build_filters) + query=$(build_query "system.memory_usage_percentage" "$filters" "5m") + + assert_contains "$query" "nullplatform_system_memory_usage_percentage" +} + +@test "metric: build_query generates rpm query with rate" { + filters=$(build_filters) + query=$(build_query "http.rpm" "$filters" "5m") + + assert_contains "$query" "rate(" + assert_contains "$query" "* 60" +} + +@test "metric: build_query generates error_rate query" { + filters=$(build_filters) + query=$(build_query "http.error_rate" "$filters" "5m") + + assert_contains "$query" 'quality="OK (2XX, 3XX)"' + assert_contains "$query" "*100" +} + +@test "metric: build_query generates response_time query" { + filters=$(build_filters) + query=$(build_query "http.response_time" "$filters" "5m") + + assert_contains "$query" "idelta(" + assert_contains "$query" "nullplatform_http_response_time" +} + +@test "metric: build_query generates healthcheck_count query" { + filters=$(build_filters) + query=$(build_query "http.healthcheck_count" "$filters" "5m") + + assert_contains "$query" 'is_healthcheck="yes"' +} + +@test "metric: build_query generates healthcheck_fail query" { + filters=$(build_filters) + query=$(build_query "http.healthcheck_fail" "$filters" "5m") + + assert_contains "$query" 'is_healthcheck="yes"' + assert_contains "$query" 'quality="OK (2XX, 3XX)"' +} + +@test "metric: build_query generates cronjob execution_count query" { + filters=$(build_filters) + query=$(build_query "cronjob.execution_count" "$filters" "5m") + + assert_contains "$query" "kube_job_status_succeeded" + assert_contains "$query" "kube_job_status_failed" + assert_contains "$query" "job-${SCOPE_ID}" +} + +@test "metric: build_query generates cronjob success_count query" { + filters=$(build_filters) + query=$(build_query "cronjob.success_count" "$filters" "5m") + + assert_contains "$query" "kube_job_status_succeeded" +} + +@test "metric: build_query generates cronjob failure_count query" { + filters=$(build_filters) + query=$(build_query "cronjob.failure_count" "$filters" "5m") + + assert_contains "$query" "kube_job_status_failed" +} + +@test "metric: build_query includes group_by when set" { + export GROUP_BY="instance_id" + filters=$(build_filters) + query=$(build_query "system.cpu_usage_percentage" "$filters" "5m") + + assert_contains "$query" "by (instance_id)" +} + +@test "metric: build_query omits group_by when empty" { + export GROUP_BY="" + filters=$(build_filters) + query=$(build_query "system.cpu_usage_percentage" "$filters" "5m") + + [[ "$query" != *"by ("* ]] +} + +@test "metric: build_query handles empty array group_by" { + export GROUP_BY="[]" + filters=$(build_filters) + query=$(build_query "system.cpu_usage_percentage" "$filters" "5m") + + [[ "$query" != *"by ("* ]] +} + +@test "metric: build_query returns default query for unknown metric" { + filters=$(build_filters) + query=$(build_query "unknown.metric" "$filters" "5m") + + assert_contains "$query" "up{" +} + +# ============================================================================= +# urlencode tests +# ============================================================================= +@test "metric: urlencode encodes special characters" { + result=$(urlencode "foo bar") + + assert_equal "$result" "foo%20bar" +} + +@test "metric: urlencode preserves alphanumeric characters" { + result=$(urlencode "abc123") + + assert_equal "$result" "abc123" +} + +@test "metric: urlencode encodes equals sign" { + result=$(urlencode "key=value") + + assert_contains "$result" "%3d" +} + +# ============================================================================= +# Full script integration tests +# ============================================================================= +@test "metric: returns empty results when METRIC_NAME is empty" { + unset METRIC_NAME + export METRIC_NAME="" + + run bash "$BATS_TEST_DIRNAME/../metric" + + [ "$status" -eq 1 ] + result=$(echo "$output" | jq -r '.results') + assert_equal "$result" "[]" +} + +@test "metric: returns empty results when APPLICATION_ID is empty" { + unset APPLICATION_ID + export APPLICATION_ID="" + + run bash "$BATS_TEST_DIRNAME/../metric" + + [ "$status" -eq 1 ] + result=$(echo "$output" | jq -r '.results') + assert_equal "$result" "[]" +} + +@test "metric: returns error when PROM_URL is empty" { + unset PROM_URL + export PROM_URL="" + + run bash "$BATS_TEST_DIRNAME/../metric" + + [ "$status" -eq 1 ] + assert_contains "$output" "PROM_URL is required" +} + +@test "metric: returns valid JSON response structure" { + run bash "$BATS_TEST_DIRNAME/../metric" + + [ "$status" -eq 0 ] + # Validate JSON structure + metric=$(echo "$output" | jq -r '.metric') + type=$(echo "$output" | jq -r '.type') + period=$(echo "$output" | jq -r '.period_in_seconds') + unit=$(echo "$output" | jq -r '.unit') + results=$(echo "$output" | jq -r '.results') + + assert_equal "$metric" "system.cpu_usage_percentage" + assert_equal "$type" "gauge" + assert_not_empty "$unit" + assert_not_empty "$results" +} diff --git a/k8s/metric/workflows/metric.yaml b/k8s/metric/workflows/metric.yaml index d7b668c2..b2fcfa50 100644 --- a/k8s/metric/workflows/metric.yaml +++ b/k8s/metric/workflows/metric.yaml @@ -1,4 +1,13 @@ steps: + - name: load logging + type: script + file: "$SERVICE_PATH/logging" + output: + - name: log + type: function + parameters: + level: string + message: string - name: build context type: script file: "$SERVICE_PATH/metric/build_context"