diff --git a/Tiltfile b/Tiltfile index fa4e4e6e..4888eb7e 100644 --- a/Tiltfile +++ b/Tiltfile @@ -261,6 +261,7 @@ third_party_operator_flags = [ if settings.get('openshiftSCC'): third_party_operator_flags += [ '--set=altinity-clickhouse-operator.crdHook.enabled=false', + '--values=./deploy/operator/profiles/openshift.yaml' ] if settings.get("installTelemetry"): diff --git a/config/openshift-dev/openshift-security.yaml b/config/openshift-dev/openshift-security.yaml index c75c3877..d2d93ae3 100644 --- a/config/openshift-dev/openshift-security.yaml +++ b/config/openshift-dev/openshift-security.yaml @@ -12,3 +12,8 @@ capabilities: drop: - ALL +- op: add + path: /spec/template/spec/containers/0/env/- + value: + name: KAFKA_FSGROUP + value: "0" diff --git a/deploy/operator/profiles/openshift.yaml b/deploy/operator/profiles/openshift.yaml new file mode 100644 index 00000000..72f2106b --- /dev/null +++ b/deploy/operator/profiles/openshift.yaml @@ -0,0 +1,12 @@ +grafana-operator: + isOpenShift: true + +minio-operator: + operator: + securityContext: + runAsUser: null + runAsGroup: null + fsGroup: null + containerSecurityContext: + runAsUser: null + runAsGroup: null diff --git a/hack/scripts/setup_crc.sh b/hack/scripts/setup_crc.sh index 94efd064..721991cf 100755 --- a/hack/scripts/setup_crc.sh +++ b/hack/scripts/setup_crc.sh @@ -7,8 +7,9 @@ command -v oc >/dev/null 2>&1 || { echo "Error: oc is required but not installed command -v docker >/dev/null 2>&1 || { echo "Error: docker is required but not installed." >&2; exit 1; } echo "Configuring CRC resources..." -crc config set memory 16384 -crc config set disk-size 80 +crc config set cpus 6 >/dev/null +crc config set memory 16384 >/dev/null +crc config set disk-size 80 >/dev/null CRC_STATUS=$(crc status --output json 2>/dev/null | python3 -c "import sys,json; print(json.load(sys.stdin).get('crcStatus','Unknown'))" 2>/dev/null || echo "Unknown") if [[ "$CRC_STATUS" != "Running" ]]; then @@ -74,6 +75,8 @@ oc new-project operator-system 2>/dev/null || oc project operator-system 2>/dev/ echo "Creating wandb-operator namespace..." oc new-project wandb-operator 2>/dev/null || oc project wandb-operator 2>/dev/null || true +kubectl config use-context crc-admin + echo "" echo "Done. CRC is ready for Tilt." echo "" diff --git a/internal/controller/infra/managed/kafka/strimzi/spec.go b/internal/controller/infra/managed/kafka/strimzi/spec.go index 65c60cd2..8bf0aed3 100644 --- a/internal/controller/infra/managed/kafka/strimzi/spec.go +++ b/internal/controller/infra/managed/kafka/strimzi/spec.go @@ -3,12 +3,14 @@ package strimzi import ( "context" "fmt" + "os" "strconv" apiv2 "github.com/wandb/operator/api/v2" "github.com/wandb/operator/internal/controller/common" "github.com/wandb/operator/internal/logx" - "github.com/wandb/operator/pkg/vendored/strimzi-kafka/v1" + v1 "github.com/wandb/operator/pkg/vendored/strimzi-kafka/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -41,6 +43,21 @@ func createKafkaMetricsConfig(telemetry apiv2.Telemetry) *v1.MetricsConfig { } } +// kafkaPodSecurityContext returns a PodSecurityContext with FSGroup set if the +// KAFKA_FSGROUP env var is configured on the operator. Returns nil otherwise, +// allowing the platform to apply its own defaults. +func kafkaPodSecurityContext() *corev1.PodSecurityContext { + val, ok := os.LookupEnv("KAFKA_FSGROUP") + if !ok { + return nil + } + fsGroup, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return nil + } + return &corev1.PodSecurityContext{FSGroup: &fsGroup} +} + // ToKafkaVendorSpec converts a KafkaSpec to a Kafka CR. // This function translates the high-level Kafka spec into the vendor-specific // Kafka format used by the Strimzi operator. @@ -187,6 +204,7 @@ func ToKafkaNodePoolVendorSpec( Metadata: &v1.MetadataTemplate{ Labels: BuildWandbKafkaLabels(wandb), }, + SecurityContext: kafkaPodSecurityContext(), }, PersistentVolumeClaim: &v1.ResourceTemplate{ Metadata: &v1.MetadataTemplate{