From c80f3fc6ee51d5a533a5a2548309bd58c173c3da Mon Sep 17 00:00:00 2001 From: Lucas Benedito Date: Thu, 26 Mar 2026 13:22:27 +0000 Subject: [PATCH 1/2] Add standardized Makefile includes and developer documentation Introduce modular Makefile system (common.mk + operator.mk) for consistent dev workflows. Standardize CONTRIBUTING.md and docs/development.md to follow community conventions with clear separation: contributing guidelines for process, development guide for technical setup. - Add common.mk with shared dev workflow targets (make up/down) - Add operator.mk with EDA-specific variables and targets - Restructure CONTRIBUTING.md: process, testing requirements, community links - Expand docs/development.md: customization options table, teardown options, bundle generation via make targets - Simplify README.md contributing section Assisted-by: Claude Signed-off-by: Lucas Benedito --- CONTRIBUTING.md | 79 +++--- Makefile | 94 +++---- README.md | 2 +- config/default/kustomization.yaml | 2 +- dev/eda-cr/eda-openshift-cr.yml | 4 +- docs/development.md | 162 +++++++++-- down.sh | 29 -- makefiles/common.mk | 439 ++++++++++++++++++++++++++++++ makefiles/operator.mk | 64 +++++ up.sh | 131 --------- 10 files changed, 722 insertions(+), 284 deletions(-) delete mode 100755 down.sh create mode 100644 makefiles/common.mk create mode 100644 makefiles/operator.mk delete mode 100755 up.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c9671d8d..42c0a179 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,66 +1,53 @@ -# Contributing +# Contributing to EDA Server Operator +Hi there! We're excited to have you as a contributor. -## Development Environment +Have questions about this document or anything not covered here? Please file an issue at [https://github.com/ansible/eda-server-operator/issues](https://github.com/ansible/eda-server-operator/issues). -There are a couple ways to make and test changes to an Ansible operator. The easiest way is to build and deploy the operator from your branch using the make targets. This is closed to how the operator will be used, and is what is documented below. However, it may be useful to run the EDA Operator roles directly on your local machine for faster iteration. This involves a bit more set up, and is described in the [Debugging docs](./docs/debugging.md). +## Things to know prior to submitting code -First, you need to have a k8s cluster up. If you don't already have a k8s cluster, you can use minikube to start a lightweight k8s cluster locally by following these [minikube test cluster docs](./docs/minikube-test-cluster.md). +- All code submissions are done through pull requests against the `main` branch. +- All PRs must have a single commit. Make sure to `squash` any changes into a single commit. +- Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason. +- If collaborating with someone else on the same branch, consider using `--force-with-lease` instead of `--force`. This will prevent you from accidentally overwriting commits pushed by someone else. For more information, see [git push --force-with-lease](https://git-scm.com/docs/git-push#git-push---force-with-leaseltrefnamegt). +- We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions, or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com). +## Setting up your development environment +See [docs/development.md](docs/development.md) for prerequisites, build/deploy instructions, and available Makefile targets. -### Build Operator Image +For faster iteration, you can also run the EDA Operator roles directly on your local machine. See the [Debugging docs](docs/debugging.md) for details. -Clone the eda-server-operator +## Submitting your work +1. From your fork's `main` branch, create a new branch to stage your changes. +```sh +git checkout -b ``` -git clone git@github.com:ansible/eda-server-operator.git +2. Make your changes. +3. Test your changes (see [Testing](#testing) below). +4. Commit your changes. +```sh +git add +git commit -m "My message here" ``` +5. Create your [pull request](https://github.com/ansible/eda-server-operator/pulls). -Create an image repo in your user called `eda-server-operator` on [quay.io](https://quay.io) or your preferred image registry. +> **Note**: If you have multiple commits, make sure to `squash` them into a single commit before submitting. -Build & push the operator image +## Testing -``` -export QUAY_USER=username -export TAG=feature-branch -make docker-build docker-push IMG=quay.io/$QUAY_USER/eda-server-operator:$TAG -``` - - -### Deploy EDA Operator +All changes must be tested before submission: +- **Linting** (required for all PRs): `make lint` +- See the [Testing section in docs/development.md](docs/development.md#testing) for details on running tests locally. -1. Log in to your K8s or Openshift cluster. +## Reporting Issues -``` -kubectl login -``` +We welcome your feedback, and encourage you to file an issue when you run into a problem at [https://github.com/ansible/eda-server-operator/issues](https://github.com/ansible/eda-server-operator/issues). -2. Run the `make deploy` target +## Getting Help -``` -NAMESPACE=eda IMG=quay.io/$QUAY_USER/eda-server-operator:$TAG make deploy -``` -> **Note** The `latest` tag on the quay.io/ansible/eda-server-operator repo is the latest _released_ (tagged) version and the `main` tag is built from the HEAD of the `main` branch. To deploy with the latest code in `main` branch, check out the main branch, and use `IMG=quay.io/ansible/eda-server-operator:main` instead. +### Forum - -### Create an EDA CR - -Create a yaml file defining the EDA custom resource - -```yaml -# eda.yaml -apiVersion: eda.ansible.com/v1alpha1 -kind: EDA -metadata: - name: my-eda -spec: - automation_server_url: https://awx-host -``` - -3. Now apply this yaml - -```bash -$ kubectl apply -f eda.yaml -``` +Join the [Ansible Forum](https://forum.ansible.com) for questions, help, and development discussions. Search for posts tagged with [`eda`](https://forum.ansible.com/tag/eda) or start a new discussion. diff --git a/Makefile b/Makefile index 56252f2a..3988aae2 100644 --- a/Makefile +++ b/Makefile @@ -3,10 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.0.1 - -# Default ENGINE for building the operator (default docker) -ENGINE ?= docker +# VERSION ?= 0.0.1 # Set in operator.mk # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") @@ -31,8 +28,8 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) # This variable is used to construct full image tags for bundle and catalog images. # # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both -# ansible.com/eda-server-operator-bundle:$VERSION and ansible.com/eda-server-operator-catalog:$VERSION. -IMAGE_TAG_BASE ?= quay.io/ansible/eda-server-operator +# example.com/temp-operator-bundle:$VERSION and example.com/temp-operator-catalog:$VERSION. +# IMAGE_TAG_BASE ?= quay.io// # Set in operator.mk # BUNDLE_IMG defines the image:tag used for the bundle. # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) @@ -49,6 +46,11 @@ ifeq ($(USE_IMAGE_DIGESTS), true) BUNDLE_GEN_FLAGS += --use-image-digests endif +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.40.0 +CONTAINER_TOOL ?= podman + # Image URL to use all building/pushing image targets IMG ?= $(IMAGE_TAG_BASE):$(VERSION) @@ -81,11 +83,11 @@ run: ansible-operator ## Run against the configured Kubernetes cluster in ~/.kub .PHONY: docker-build docker-build: ## Build docker image with the manager. - $(ENGINE) build $(BUILD_ARGS) -t ${IMG} . + docker build $(BUILD_ARGS) -t ${IMG} . .PHONY: docker-push docker-push: ## Push docker image with the manager. - $(ENGINE) push ${IMG} + docker push ${IMG} # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: @@ -93,7 +95,6 @@ docker-push: ## Push docker image with the manager. # - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ # - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> than the export will fail) # To properly provided solutions that supports more than one platform you should use this option. -PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le .PHONY: docker-buildx docker-buildx: ## Build and push docker image for the manager for cross-platform support - docker buildx create --name project-v3-builder @@ -101,31 +102,31 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform - docker buildx build --push $(BUILD_ARGS) --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile . - docker buildx rm project-v3-builder -.PHONY: podman-buildx -podman-buildx: ## Build and push podman image for the manager for cross-platform support - podman build --platform=$(PLATFORMS) $(BUILD_ARGS) --manifest ${IMG} -f Dockerfile . - podman manifest push --all ${IMG} ${IMG} - ##@ Deployment +ifndef ignore-not-found + ignore-not-found = false +endif + .PHONY: install install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl apply -f - .PHONY: uninstall uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl delete -f - + $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - cd config/default && $(KUSTOMIZE) edit set namespace ${NAMESPACE} $(KUSTOMIZE) build config/default | kubectl apply -f - .PHONY: undeploy undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. - cd config/default && $(KUSTOMIZE) edit set namespace ${NAMESPACE} - $(KUSTOMIZE) build config/default | kubectl delete -f - + $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - + +## Location for locally installed tools +LOCALBIN ?= $(shell pwd)/bin OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') @@ -138,7 +139,7 @@ ifeq (,$(shell which kustomize 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(KUSTOMIZE)) ;\ - curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.3.0/kustomize_v5.3.0_$(OS)_$(ARCH).tar.gz | \ + curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.6.0/kustomize_v5.6.0_$(OS)_$(ARCH).tar.gz | \ tar xzf - -C bin/ ;\ } else @@ -146,22 +147,6 @@ KUSTOMIZE = $(shell which kustomize) endif endif -.PHONY: operator-sdk -OPERATOR_SDK = $(shell pwd)/bin/operator-sdk -operator-sdk: ## Download operator-sdk locally if necessary, preferring the $(pwd)/bin path over global if both exist. -ifeq (,$(wildcard $(OPERATOR_SDK))) -ifeq (,$(shell which operator-sdk 2>/dev/null)) - @{ \ - set -e ;\ - mkdir -p $(dir $(OPERATOR_SDK)) ;\ - curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/v1.40.0/operator-sdk_$(OS)_$(ARCH) ;\ - chmod +x $(OPERATOR_SDK) ;\ - } -else -OPERATOR_SDK = $(shell which operator-sdk) -endif -endif - .PHONY: ansible-operator ANSIBLE_OPERATOR = $(shell pwd)/bin/ansible-operator ansible-operator: ## Download ansible-operator locally if necessary, preferring the $(pwd)/bin path over global if both exist. @@ -170,7 +155,7 @@ ifeq (,$(shell which ansible-operator 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(ANSIBLE_OPERATOR)) ;\ - curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.40.0/ansible-operator_$(OS)_$(ARCH) ;\ + curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/ansible-operator-plugins/releases/download/$(OPERATOR_SDK_VERSION)/ansible-operator_$(OS)_$(ARCH) ;\ chmod +x $(ANSIBLE_OPERATOR) ;\ } else @@ -178,23 +163,40 @@ ANSIBLE_OPERATOR = $(shell which ansible-operator) endif endif +.PHONY: operator-sdk +OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$(OS)_$(ARCH) ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + + .PHONY: bundle bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. $(OPERATOR_SDK) generate kustomize manifests -q cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) - $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) $(OPERATOR_SDK) bundle validate ./bundle .PHONY: bundle-build bundle-build: ## Build the bundle image. - $(ENGINE) build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + $(CONTAINER_TOOL) build -f bundle.Dockerfile -t $(BUNDLE_IMG) . .PHONY: bundle-push bundle-push: ## Push the bundle image. $(MAKE) docker-push IMG=$(BUNDLE_IMG) .PHONY: opm -OPM = ./bin/opm +OPM = $(LOCALBIN)/opm opm: ## Download opm locally if necessary. ifeq (,$(wildcard $(OPM))) ifeq (,$(shell which opm 2>/dev/null)) @@ -226,17 +228,15 @@ endif # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator .PHONY: catalog-build catalog-build: opm ## Build a catalog image. - $(OPM) index add --container-tool $(ENGINE) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + $(OPM) index add --container-tool $(CONTAINER_TOOL) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) # Push the catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) -# Generate operator.yaml with image tag as a release artifact -.PHONY: generate-operator-yaml -generate-operator-yaml: kustomize ## Generate operator.yaml with image tag $(VERSION) - @cd config/manager && $(KUSTOMIZE) edit set image controller=quay.io/ansible/eda-server-operator:${VERSION} - @$(KUSTOMIZE) build config/default > ./operator.yaml - - @echo "Generated operator.yaml with image tag $(VERSION)" +##@ Includes +# Operator-specific targets and variables +-include makefiles/operator.mk +# Shared dev workflow targets (synced across all operator repos) +-include makefiles/common.mk diff --git a/README.md b/README.md index 68459370..e5392ca3 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ Table of Contents ## Contributing -Please visit [our contributing guide](./CONTRIBUTING.md) which has details about how to set up your development environment. +Please visit our [contributing guidelines](./CONTRIBUTING.md) and [development guide](./docs/development.md) for information on how to set up your environment, build and deploy the operator, and submit changes. ### Prerequisites diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 63cdfbfa..ec6ea5c9 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,5 +1,5 @@ # Adds namespace to all resources. -namespace: eda-server-operator-system +namespace: eda # Value of this field is prepended to the # names of all resources, e.g. a deployment named diff --git a/dev/eda-cr/eda-openshift-cr.yml b/dev/eda-cr/eda-openshift-cr.yml index 5fce5ad1..be78079b 100644 --- a/dev/eda-cr/eda-openshift-cr.yml +++ b/dev/eda-cr/eda-openshift-cr.yml @@ -3,7 +3,7 @@ kind: EDA metadata: name: eda spec: - automation_server_url: https://awx-awx.apps.aap-dev.ocp4.testing.ansible.com + automation_server_url: https://awx.example.com automation_server_ssl_verify: 'no' service_type: ClusterIP ingress_type: Route @@ -34,7 +34,7 @@ spec: value: "Always" # CA Bundle - bundle_cacert_secret: my-custom-certs + # bundle_cacert_secret: my-custom-certs # -- Resource Requirements api: diff --git a/docs/development.md b/docs/development.md index 21811cce..3d5595b2 100644 --- a/docs/development.md +++ b/docs/development.md @@ -1,63 +1,171 @@ # Development Guide -There are development scripts and yaml examples in the [`dev/`](../dev) directory that, along with the up.sh and down.sh scripts in the root of the repo, can be used to build, deploy and test changes made to the eda-server-operator. +There are development yaml examples in the [`dev/`](../dev) directory and Makefile targets that can be used to build, deploy and test changes made to the eda-server-operator. +Run `make help` to see all available targets and options. -## Build and Deploy +## Prerequisites -If you clone the repo, and make sure you are logged in at the CLI with oc and your cluster, you can run: +You will need to have the following tools installed: -``` -export QUAY_USER=username -export NAMESPACE=eda -export TAG=test -./up.sh -``` +* [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) +* [podman](https://podman.io/docs/installation) or [docker](https://docs.docker.com/get-docker/) +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +* [oc](https://docs.openshift.com/container-platform/4.11/cli_reference/openshift_cli/getting-started-cli.html) (if using OpenShift) -You can add those variables to your .bashrc file so that you can just run `./up.sh` in the future. +You will also need a container registry account. This guide uses [quay.io](https://quay.io), but any container registry will work. -> Note: the first time you run this, it will create quay.io repos on your fork. You will need to either make those public, or create a global pull secret on your Openshift cluster. +If you don't already have a k8s cluster, you can use minikube to start a lightweight cluster locally by following the [minikube test cluster docs](minikube-test-cluster.md). -To get the URL, if on **Openshift**, run: +## Registry Setup + +1. Go to [quay.io](https://quay.io) and create a repository named `eda-server-operator` under your username. +2. Login at the CLI: +```sh +podman login quay.io ``` -$ oc get route -``` -On **k8s with ingress**, run: +> **Note**: The first time you run `make up`, it will create quay.io repos on your fork. You will need to either make those public or create a global pull secret on your cluster. + + +## Build and Deploy + +EDA requires a running AWX instance. Make sure you are logged into your cluster (`oc login` or `kubectl` configured), then run: + +```sh +# Discover the AWX URL from your cluster (AWX_NAMESPACE defaults to 'awx') +make awx-url AWX_NAMESPACE=awx +# Deploy EDA with the AWX URL +AUTOMATION_SERVER_URL=https://your-awx-route QUAY_USER=username make up ``` -$ kubectl get ing + +This will: +1. Login to container registries +2. Create the target namespace +3. Build the operator image and push it to your registry +4. Deploy the operator via kustomize +5. Apply dev secrets and create a dev EDA instance configured to connect to AWX + +### Customization Options + +| Variable | Default | Description | +|----------|---------|-------------| +| `QUAY_USER` | _(required)_ | Your quay.io username | +| `AUTOMATION_SERVER_URL` | _(required)_ | AWX URL for EDA to connect to (use `make awx-url` to discover) | +| `AWX_NAMESPACE` | `awx` | Namespace where AWX is running (used by `make awx-url`) | +| `NAMESPACE` | `eda` | Target namespace | +| `DEV_TAG` | `dev` | Image tag for dev builds | +| `CONTAINER_TOOL` | `podman` | Container engine (`podman` or `docker`) | +| `PLATFORM` | _(auto-detected)_ | Target platform (e.g., `linux/amd64`) | +| `MULTI_ARCH` | `false` | Build multi-arch image (`linux/arm64,linux/amd64`) | +| `DEV_IMG` | `quay.io//eda-server-operator` | Override full image path (skips QUAY_USER) | +| `BUILD_IMAGE` | `true` | Set to `false` to skip image build (use existing image) | +| `CREATE_CR` | `true` | Set to `false` to skip creating the dev EDA instance | +| `CREATE_SECRETS` | `true` | Set to `false` to skip creating dev secrets | +| `IMAGE_PULL_POLICY` | `Always` | Set to `Never` for local builds without push | +| `BUILD_ARGS` | _(empty)_ | Extra args passed to container build (e.g., `--no-cache`) | +| `DEV_CR` | `dev/eda-cr/eda-openshift-cr.yml` | Path to the dev CR to apply | +| `PODMAN_CONNECTION` | _(empty)_ | Remote podman connection name | + +Examples: + +```bash +# Use a specific namespace and tag +QUAY_USER=username NAMESPACE=eda DEV_TAG=mytag make up + +# Use docker instead of podman +CONTAINER_TOOL=docker QUAY_USER=username make up + +# Build for a specific platform (e.g., when on ARM building for x86) +PLATFORM=linux/amd64 QUAY_USER=username make up + +# Deploy without building (use an existing image) +BUILD_IMAGE=false DEV_IMG=quay.io/myuser/eda-server-operator:latest make up ``` -On **k8s with nodeport**, run: +### Accessing the Deployment +On **OpenShift**: +```sh +oc get route ``` -$ kubectl get svc + +On **k8s with ingress**: +```sh +kubectl get ing ``` -The URL is then `http://:` +On **k8s with nodeport**: +```sh +kubectl get svc +``` +The URL is then `http://:`. -> Note: NodePort will only work if you expose that port on your underlying k8s node, or are accessing it from localhost. +> **Note**: NodePort will only work if you expose that port on your underlying k8s node, or are accessing it from localhost. -By default, the usename and password will be admin and password if using the `up.sh` script because it pre-creates a custom admin password k8s secret and specifies it on the EDA custom resource spec. Without that, a password would have been generated and stored in a k8s secret named -admin-password. +### Default Credentials -## Clean up +The dev CR pre-creates an admin password secret. Default credentials are: +- **Username**: `admin` +- **Password**: `password` +Without the dev CR, a password would be generated and stored in a secret named `-admin-password`. -Same thing for cleanup, just run ./down.sh and it will clean up your namespace on that cluster +## Clean up +To tear down your development deployment: + +```sh +make down ``` -./down.sh -``` -## Running CI tests locally +### Teardown Options + +| Variable | Default | Description | +|----------|---------|-------------| +| `KEEP_NAMESPACE` | `false` | Set to `true` to keep the namespace for reuse | +| `DELETE_PVCS` | `true` | Set to `false` to preserve PersistentVolumeClaims | +| `DELETE_SECRETS` | `true` | Set to `false` to preserve secrets | +Examples: +```bash +# Keep the namespace for faster redeploy +KEEP_NAMESPACE=true make down + +# Keep PVCs (preserve database data between deploys) +DELETE_PVCS=false make down ``` + + +## Testing + +### Linting + +Run linting checks (required for all PRs): + +```sh make lint ``` -More tests coming soon... + +## Bundle Generation + +If you have the Operator Lifecycle Manager (OLM) installed, you can generate and deploy an operator bundle: + +```bash +# Generate bundle manifests and validate +make bundle + +# Build and push the bundle image +make bundle-build bundle-push + +# Build and push a catalog image +make catalog-build catalog-push +``` + +After pushing the catalog, create a `CatalogSource` in your cluster pointing to the catalog image. Once the CatalogSource is in a READY state, the operator will be available in OperatorHub. diff --git a/down.sh b/down.sh deleted file mode 100755 index 2abc99e9..00000000 --- a/down.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# EDA Operator down.sh - -# -- Usage -# NAMESPACE=eda ./down.sh - -# -- Variables -NAMESPACE=${NAMESPACE:-eda} -TAG=${TAG:-dev} -QUAY_USER=${QUAY_USER:-developer} -IMG=quay.io/$QUAY_USER/eda-server-operator:$TAG -EDA_CR=${EDA_CR:-eda} - - -# -- Delete Backups -kubectl delete edabackup --all - -# -- Delete Restores -kubectl delete edarestore --all - -# Delete old operator deployment -kubectl delete deployment eda-server-operator-controller-manager - -# Deploy Operator -make undeploy IMG=$IMG NAMESPACE=$NAMESPACE - -# Remove PVCs -kubectl delete pvc postgres-15-$EDA_CR-postgres-15-0 - diff --git a/makefiles/common.mk b/makefiles/common.mk new file mode 100644 index 00000000..02e9b5d5 --- /dev/null +++ b/makefiles/common.mk @@ -0,0 +1,439 @@ +# common.mk — Shared dev workflow targets for AAP operators +# +# Synced across all operator repos via GHA. +# Operator-specific customization goes in operator.mk. +# +# Usage: +# make up # Full dev deploy +# make down # Full dev undeploy +# +# Required variables (set in operator.mk): +# NAMESPACE — target namespace +# DEPLOYMENT_NAME — operator deployment name +# VERSION — operator version +# +# Optional overrides: +# CONTAINER_TOOL=docker make up # use docker instead of podman (default in Makefile) +# QUAY_USER=myuser make up +# DEV_TAG=mytag make up +# DEV_IMG=registry.example.com/my-operator make up # override image (skips QUAY_USER) +# IMAGE_PULL_POLICY=Never make up # set imagePullPolicy (e.g. for local builds) +# PODMAN_CONNECTION=aap-lab make up # use remote podman connection +# KEEP_NAMESPACE=true make down # undeploy but keep namespace +# PLATFORM=linux/amd64 make up # build for specific platform (auto-detected from cluster) +# MULTI_ARCH=true make up # build multi-arch image (PLATFORMS=linux/arm64,linux/amd64) + +# Suppress "Entering/Leaving directory" messages from recursive make calls +MAKEFLAGS += --no-print-directory + +#@ Common Variables + +# Kube CLI auto-detect (oc preferred, kubectl fallback) +KUBECTL ?= $(shell command -v oc 2>/dev/null || command -v kubectl 2>/dev/null) + +# Dev workflow +QUAY_USER ?= +REGISTRIES ?= registry.redhat.io $(if $(QUAY_USER),quay.io/$(QUAY_USER)) +DEV_TAG ?= dev +PULL_SECRET_FILE ?= dev/pull-secret.yml +CREATE_PULL_SECRET ?= true +IMAGE_PULL_POLICY ?= +PODMAN_CONNECTION ?= + +# Dev image: defaults to quay.io//, overridable via DEV_IMG +_OPERATOR_NAME = $(notdir $(IMAGE_TAG_BASE)) +DEV_IMG ?= $(if $(QUAY_USER),quay.io/$(QUAY_USER)/$(_OPERATOR_NAME),$(IMAGE_TAG_BASE)) + +# Build platform (auto-detected from cluster, override with PLATFORM=linux/amd64) +MULTI_ARCH ?= false +PLATFORMS ?= linux/arm64,linux/amd64 + +# Auto-detect registry auth config +REGISTRY_AUTH_CONFIG ?= $(shell \ + if [ "$(CONTAINER_TOOL)" = "podman" ]; then \ + for f in "$${XDG_RUNTIME_DIR}/containers/auth.json" \ + "$${HOME}/.config/containers/auth.json" \ + "$${HOME}/.docker/config.json"; do \ + [ -f "$$f" ] && echo "$$f" && break; \ + done; \ + else \ + [ -f "$${HOME}/.docker/config.json" ] && echo "$${HOME}/.docker/config.json"; \ + fi) + +# Container tool with optional remote connection (podman only) +_CONTAINER_CMD = $(CONTAINER_TOOL)$(if $(and $(filter podman,$(CONTAINER_TOOL)),$(PODMAN_CONNECTION)), --connection $(PODMAN_CONNECTION),) + +# Portable sed -i (GNU vs BSD) +_SED_I = $(shell if sed --version >/dev/null 2>&1; then echo 'sed -i'; else echo 'sed -i ""'; fi) + +# Custom configs to apply during post-deploy (secrets, configmaps, etc.) +DEV_CUSTOM_CONFIG ?= + +# Dev CR to apply after deployment (set in operator.mk) +DEV_CR ?= +CREATE_CR ?= true + +# Teardown configuration (set in operator.mk) +TEARDOWN_CR_KINDS ?= +TEARDOWN_BACKUP_KINDS ?= +TEARDOWN_RESTORE_KINDS ?= +OLM_SUBSCRIPTIONS ?= +DELETE_PVCS ?= true +DELETE_SECRETS ?= true +KEEP_NAMESPACE ?= false + +##@ Dev Workflow + +.PHONY: up +up: _require-img _require-namespace ## Full dev deploy + @$(MAKE) registry-login + @$(MAKE) ns-wait + @$(MAKE) ns-create + @$(MAKE) ns-security + @$(MAKE) pull-secret + @$(MAKE) patch-pull-policy + @$(MAKE) operator-up + +.PHONY: down +down: _require-namespace ## Full dev undeploy + @echo "=== Tearing down dev environment ===" + @$(MAKE) _teardown-restores + @$(MAKE) _teardown-backups + @$(MAKE) _teardown-operands + @$(MAKE) _teardown-pvcs + @$(MAKE) _teardown-secrets + @$(MAKE) _teardown-olm + @$(MAKE) _teardown-namespace + +#@ Operator Deploy Building Blocks +# +# Composable targets for operator-up. Each operator.mk wires these +# together in its own operator-up target, adding repo-specific steps. +# +# Kustomize repos: +# operator-up: _operator-build-and-push _operator-deploy _operator-wait-ready _operator-post-deploy +# +# OLM repos (gateway): +# operator-up: _olm-cleanup _olm-deploy _operator-build-and-inject _operator-wait-ready _operator-post-deploy + +.PHONY: _operator-build-and-push +_operator-build-and-push: + @if [ "$(BUILD_IMAGE)" != "true" ]; then \ + echo "Skipping image build (BUILD_IMAGE=false)"; \ + exit 0; \ + fi; \ + $(MAKE) dev-build; \ + echo "Pushing $(DEV_IMG):$(DEV_TAG)..."; \ + $(_CONTAINER_CMD) push $(DEV_IMG):$(DEV_TAG) + +.PHONY: _operator-deploy +_operator-deploy: + @$(MAKE) pre-deploy-cleanup + @cd config/default && $(KUSTOMIZE) edit set namespace $(NAMESPACE) + @$(MAKE) deploy IMG=$(DEV_IMG):$(DEV_TAG) + +.PHONY: _operator-wait-ready +_operator-wait-ready: + @echo "Waiting for operator pods to be ready..." + @ATTEMPTS=0; \ + while [ $$ATTEMPTS -lt 30 ]; do \ + READY=$$($(KUBECTL) get deployment $(DEPLOYMENT_NAME) -n $(NAMESPACE) \ + -o jsonpath='{.status.readyReplicas}' 2>/dev/null); \ + DESIRED=$$($(KUBECTL) get deployment $(DEPLOYMENT_NAME) -n $(NAMESPACE) \ + -o jsonpath='{.status.replicas}' 2>/dev/null); \ + if [ -n "$$READY" ] && [ -n "$$DESIRED" ] && [ "$$READY" = "$$DESIRED" ] && [ "$$READY" -gt 0 ]; then \ + echo "All pods ready ($$READY/$$DESIRED)."; \ + break; \ + fi; \ + echo "Pods not ready ($$READY/$$DESIRED). Waiting..."; \ + ATTEMPTS=$$((ATTEMPTS + 1)); \ + sleep 10; \ + done; \ + if [ $$ATTEMPTS -ge 30 ]; then \ + echo "ERROR: Timed out waiting for operator pods to be ready (5 minutes)." >&2; \ + exit 1; \ + fi + @$(KUBECTL) config set-context --current --namespace=$(NAMESPACE) + +.PHONY: _operator-post-deploy +_operator-post-deploy: + @# Apply dev custom configs (secrets, configmaps, etc.) from DEV_CUSTOM_CONFIG + @$(MAKE) _apply-custom-config + @if [ "$(CREATE_CR)" = "true" ] && [ -f "$(DEV_CR)" ]; then \ + echo "Applying dev CR: $(DEV_CR)"; \ + $(KUBECTL) apply -n $(NAMESPACE) -f $(DEV_CR); \ + fi + +#@ Teardown + +.PHONY: _teardown-restores +_teardown-restores: + @for kind in $(TEARDOWN_RESTORE_KINDS); do \ + echo "Deleting $$kind resources..."; \ + $(KUBECTL) delete $$kind -n $(NAMESPACE) --all --wait=true --ignore-not-found=true || true; \ + done + +.PHONY: _teardown-backups +_teardown-backups: + @for kind in $(TEARDOWN_BACKUP_KINDS); do \ + echo "Deleting $$kind resources..."; \ + $(KUBECTL) delete $$kind -n $(NAMESPACE) --all --wait=true --ignore-not-found=true || true; \ + done + +.PHONY: _teardown-operands +_teardown-operands: + @for kind in $(TEARDOWN_CR_KINDS); do \ + echo "Deleting $$kind resources..."; \ + $(KUBECTL) delete $$kind -n $(NAMESPACE) --all --wait=true --ignore-not-found=true || true; \ + done + +.PHONY: _teardown-pvcs +_teardown-pvcs: + @if [ "$(DELETE_PVCS)" = "true" ]; then \ + echo "Deleting PVCs..."; \ + $(KUBECTL) delete pvc -n $(NAMESPACE) --all --ignore-not-found=true; \ + else \ + echo "Keeping PVCs (DELETE_PVCS=false)"; \ + fi + +.PHONY: _teardown-secrets +_teardown-secrets: + @if [ "$(DELETE_SECRETS)" = "true" ]; then \ + echo "Deleting secrets..."; \ + $(KUBECTL) delete secrets -n $(NAMESPACE) --all --ignore-not-found=true; \ + else \ + echo "Keeping secrets (DELETE_SECRETS=false)"; \ + fi + +.PHONY: _teardown-olm +_teardown-olm: + @for sub in $(OLM_SUBSCRIPTIONS); do \ + echo "Deleting subscription $$sub..."; \ + $(KUBECTL) delete subscription $$sub -n $(NAMESPACE) --ignore-not-found=true || true; \ + done + @CSV=$$($(KUBECTL) get csv -n $(NAMESPACE) --no-headers -o custom-columns=":metadata.name" 2>/dev/null | grep aap-operator || true); \ + if [ -n "$$CSV" ]; then \ + echo "Deleting CSV: $$CSV"; \ + $(KUBECTL) delete csv $$CSV -n $(NAMESPACE) --ignore-not-found=true; \ + fi + +.PHONY: _teardown-namespace +_teardown-namespace: + @if [ "$(KEEP_NAMESPACE)" != "true" ]; then \ + echo "Deleting namespace $(NAMESPACE)..."; \ + $(KUBECTL) delete namespace $(NAMESPACE) --ignore-not-found=true; \ + else \ + echo "Keeping namespace $(NAMESPACE) (KEEP_NAMESPACE=true)"; \ + fi + +##@ Registry + +.PHONY: registry-login +registry-login: ## Login to container registries + @for registry in $(REGISTRIES); do \ + echo "Logging into $$registry..."; \ + $(_CONTAINER_CMD) login $$registry; \ + done + +##@ Namespace + +.PHONY: ns-wait +ns-wait: ## Wait for namespace to finish terminating + @if $(KUBECTL) get namespace $(NAMESPACE) 2>/dev/null | grep -q 'Terminating'; then \ + echo "Namespace $(NAMESPACE) is terminating. Waiting..."; \ + while $(KUBECTL) get namespace $(NAMESPACE) 2>/dev/null | grep -q 'Terminating'; do \ + sleep 5; \ + done; \ + echo "Namespace $(NAMESPACE) terminated."; \ + fi + +.PHONY: ns-create +ns-create: ## Create namespace if it does not exist + @if ! $(KUBECTL) get namespace $(NAMESPACE) --no-headers 2>/dev/null | grep -q .; then \ + echo "Creating namespace $(NAMESPACE)"; \ + $(KUBECTL) create namespace $(NAMESPACE); \ + else \ + echo "Namespace $(NAMESPACE) already exists"; \ + fi + +.PHONY: ns-security +ns-security: ## Configure namespace security for OLM bundle unpacking + @if ! oc get scc anyuid >/dev/null 2>&1; then \ + echo "No SCC support detected (vanilla Kubernetes), applying pod security labels..."; \ + $(KUBECTL) label namespace "$(NAMESPACE)" \ + pod-security.kubernetes.io/enforce=privileged \ + pod-security.kubernetes.io/audit=privileged \ + pod-security.kubernetes.io/warn=privileged --overwrite; \ + elif $(KUBECTL) get namespace openshift-apiserver >/dev/null 2>&1; then \ + echo "Full OpenShift detected — skipping SCC grants (OLM handles bundle unpacking)"; \ + else \ + echo "MicroShift detected — granting SCCs for bundle unpack pods in $(NAMESPACE)..."; \ + oc adm policy add-scc-to-user privileged -z default -n "$(NAMESPACE)" 2>/dev/null || true; \ + oc adm policy add-scc-to-user anyuid -z default -n "$(NAMESPACE)" 2>/dev/null || true; \ + fi + +##@ Secrets + +.PHONY: pull-secret +pull-secret: ## Apply pull secret from file or create from auth config + @if [ "$(CREATE_PULL_SECRET)" != "true" ]; then \ + echo "Pull secret creation disabled (CREATE_PULL_SECRET=false)"; \ + exit 0; \ + fi; \ + if [ -f "$(PULL_SECRET_FILE)" ]; then \ + echo "Applying pull secret from $(PULL_SECRET_FILE)"; \ + $(KUBECTL) apply -n $(NAMESPACE) -f $(PULL_SECRET_FILE); \ + elif [ -n "$(REGISTRY_AUTH_CONFIG)" ] && [ -f "$(REGISTRY_AUTH_CONFIG)" ]; then \ + if ! $(KUBECTL) get secret redhat-operators-pull-secret -n $(NAMESPACE) 2>/dev/null | grep -q .; then \ + echo "Creating pull secret from $(REGISTRY_AUTH_CONFIG)"; \ + $(KUBECTL) create secret generic redhat-operators-pull-secret \ + --from-file=.dockerconfigjson="$(REGISTRY_AUTH_CONFIG)" \ + --type=kubernetes.io/dockerconfigjson \ + -n $(NAMESPACE); \ + else \ + echo "Pull secret already exists"; \ + fi; \ + else \ + echo "No pull secret file or registry auth config found, skipping"; \ + exit 0; \ + fi; \ + echo "Linking pull secret to default service account..."; \ + $(KUBECTL) patch serviceaccount default -n $(NAMESPACE) \ + -p '{"imagePullSecrets": [{"name": "redhat-operators-pull-secret"}]}' 2>/dev/null \ + && echo "Pull secret linked to default SA" \ + || echo "Warning: could not link pull secret to default SA" + +##@ Build + +.PHONY: podman-build +podman-build: ## Build image with podman + $(_CONTAINER_CMD) build $(BUILD_ARGS) -t ${IMG} . + +.PHONY: podman-push +podman-push: ## Push image with podman + $(_CONTAINER_CMD) push ${IMG} + +.PHONY: podman-buildx +podman-buildx: ## Build multi-arch image with podman + $(_CONTAINER_CMD) build $(BUILD_ARGS) --platform=$(PLATFORMS) --manifest ${IMG} -f Dockerfile . + +.PHONY: podman-buildx-push +podman-buildx-push: podman-buildx ## Build and push multi-arch image with podman + $(_CONTAINER_CMD) manifest push --all ${IMG} + +.PHONY: dev-build +dev-build: ## Build dev image (auto-detects arch of connected cluster, cross-compiles if needed) + @HOST_ARCH=$$(uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/'); \ + CLUSTER_ARCH=$$($(KUBECTL) get nodes -o jsonpath='{.items[0].status.nodeInfo.architecture}' 2>/dev/null); \ + if [ -z "$$CLUSTER_ARCH" ]; then \ + echo "WARNING: Could not detect cluster architecture. Is the cluster reachable?"; \ + echo " Falling back to host architecture ($$HOST_ARCH)"; \ + CLUSTER_ARCH="$$HOST_ARCH"; \ + fi; \ + echo "Building $(DEV_IMG):$(DEV_TAG) with $(CONTAINER_TOOL)..."; \ + echo " Host arch: $$HOST_ARCH"; \ + echo " Cluster arch: $$CLUSTER_ARCH"; \ + if [ "$(MULTI_ARCH)" = "true" ]; then \ + echo " Build mode: multi-arch ($(PLATFORMS))"; \ + $(MAKE) $(CONTAINER_TOOL)-buildx IMG=$(DEV_IMG):$(DEV_TAG) PLATFORMS=$(PLATFORMS); \ + elif [ -n "$(PLATFORM)" ]; then \ + echo " Build mode: cross-arch ($(PLATFORM))"; \ + $(MAKE) $(CONTAINER_TOOL)-buildx IMG=$(DEV_IMG):$(DEV_TAG) PLATFORMS=$(PLATFORM); \ + elif [ "$$HOST_ARCH" != "$$CLUSTER_ARCH" ]; then \ + echo " Build mode: cross-arch (linux/$$CLUSTER_ARCH)"; \ + $(MAKE) $(CONTAINER_TOOL)-buildx-push IMG=$(DEV_IMG):$(DEV_TAG) PLATFORMS=linux/$$CLUSTER_ARCH; \ + else \ + echo " Build mode: local ($$HOST_ARCH)"; \ + $(MAKE) $(CONTAINER_TOOL)-build IMG=$(DEV_IMG):$(DEV_TAG); \ + if [ "$(IMAGE_PULL_POLICY)" != "Never" ]; then \ + echo "WARNING: Local build without push. Set IMAGE_PULL_POLICY=Never or the kubelet"; \ + echo " will attempt to pull $(DEV_IMG):$(DEV_TAG) from a registry and fail."; \ + fi; \ + fi + +##@ Deployment Helpers + +.PHONY: patch-pull-policy +patch-pull-policy: ## Patch imagePullPolicy in manager config (default: Always, override with IMAGE_PULL_POLICY) + @_POLICY="$(if $(IMAGE_PULL_POLICY),$(IMAGE_PULL_POLICY),Always)"; \ + for file in config/manager/manager.yaml; do \ + if [ -f "$$file" ] && grep -q 'imagePullPolicy: IfNotPresent' "$$file"; then \ + echo "Patching imagePullPolicy to $$_POLICY in $$file"; \ + $(_SED_I) "s|imagePullPolicy: IfNotPresent|imagePullPolicy: $$_POLICY|g" "$$file"; \ + fi; \ + done + +.PHONY: pre-deploy-cleanup +pre-deploy-cleanup: ## Delete existing operator deployment (safe) + @if [ -n "$(DEPLOYMENT_NAME)" ]; then \ + echo "Cleaning up deployment $(DEPLOYMENT_NAME)..."; \ + $(KUBECTL) delete deployment $(DEPLOYMENT_NAME) \ + -n $(NAMESPACE) --ignore-not-found=true; \ + fi + +.PHONY: _apply-custom-config +_apply-custom-config: ## Apply custom configs (secrets, configmaps, etc.) + @for f in $(DEV_CUSTOM_CONFIG); do \ + if [ -f "$$f" ]; then \ + echo "Applying custom config: $$f"; \ + $(KUBECTL) apply -n $(NAMESPACE) -f $$f; \ + else \ + echo "WARNING: Custom config not found: $$f"; \ + fi; \ + done + +#@ Validation + +.PHONY: _require-img +_require-img: + @if [ -z "$(DEV_IMG)" ]; then \ + echo "Error: Set QUAY_USER or DEV_IMG."; \ + echo " export QUAY_USER="; \ + echo " or: DEV_IMG=registry.example.com/my-operator make up"; \ + exit 1; \ + fi + @if echo "$(DEV_IMG)" | grep -q '^registry\.redhat\.io'; then \ + echo "Error: Cannot push to registry.redhat.io (production registry)."; \ + echo " Set QUAY_USER or DEV_IMG to use a personal registry."; \ + exit 1; \ + fi + @if echo "$(DEV_IMG)" | grep -q '^quay\.io/'; then \ + if [ -z "$(QUAY_USER)" ]; then \ + echo "Error: Cannot push to quay.io without QUAY_USER."; \ + echo " export QUAY_USER="; \ + echo " or: DEV_IMG=/ make up"; \ + exit 1; \ + fi; \ + if ! echo "$(DEV_IMG)" | grep -q '^quay\.io/$(QUAY_USER)/'; then \ + echo "Error: DEV_IMG ($(DEV_IMG)) does not match QUAY_USER ($(QUAY_USER))."; \ + echo " Expected: quay.io/$(QUAY_USER)/"; \ + echo " Either fix QUAY_USER or set DEV_IMG explicitly."; \ + exit 1; \ + fi; \ + fi + +.PHONY: _require-namespace +_require-namespace: + @if [ -z "$(NAMESPACE)" ]; then \ + echo "Error: NAMESPACE is required. Set it in operator.mk or run: export NAMESPACE="; \ + exit 1; \ + fi + +##@ Linting + +LINT_PATHS ?= roles/ playbooks/ config/samples/ config/manager/ + +.PHONY: lint +lint: ## Run ansible-lint and check no_log usage + @echo "Checking if ansible-lint is installed..." + @which ansible-lint > /dev/null || (echo "ansible-lint not found, installing..." && pip install --user ansible-lint) + @echo "Running ansible-lint..." + @ansible-lint $(LINT_PATHS) + @if [ -d "roles/" ]; then \ + echo "Checking for no_log instances that need to use the variable..."; \ + if grep -nr ' no_log:' roles | grep -qv '"{{ no_log }}"'; then \ + echo 'Please update the following no_log statement(s) with the "{{ no_log }}" value'; \ + grep -nr ' no_log:' roles | grep -v '"{{ no_log }}"'; \ + exit 1; \ + fi; \ + fi diff --git a/makefiles/operator.mk b/makefiles/operator.mk new file mode 100644 index 00000000..9d19e2ab --- /dev/null +++ b/makefiles/operator.mk @@ -0,0 +1,64 @@ +# operator.mk — EDA Server Operator specific targets and variables +# +# This file is NOT synced across repos. Each operator maintains its own. + +#@ Operator Variables + +VERSION ?= $(shell git describe --tags 2>/dev/null || echo 0.0.1) +IMAGE_TAG_BASE ?= quay.io/ansible/eda-server-operator +NAMESPACE ?= eda +DEPLOYMENT_NAME ?= eda-server-operator-controller-manager + +# Dev CR applied by _eda-apply-cr with URL substitution (not by common DEV_CR mechanism) +_EDA_DEV_CR ?= dev/eda-cr/eda-openshift-cr.yml + +# AWX connection (required for EDA) +AUTOMATION_SERVER_URL ?= +AWX_NAMESPACE ?= awx + +# Custom configs to apply during post-deploy (secrets, configmaps, etc.) +DEV_CUSTOM_CONFIG ?= dev/secrets/admin-password-secret.yml dev/secrets/custom-pg-secret.yml dev/secrets/custom-db-fields-encryption-secret.yml + +# Feature flags +BUILD_IMAGE ?= true +CREATE_CR ?= true + +# Teardown configuration +TEARDOWN_CR_KINDS ?= eda +TEARDOWN_BACKUP_KINDS ?= edabackup +TEARDOWN_RESTORE_KINDS ?= edarestore +OLM_SUBSCRIPTIONS ?= + +##@ EDA Server Operator + +.PHONY: operator-up +operator-up: _operator-build-and-push _operator-deploy _operator-wait-ready _operator-post-deploy _eda-apply-cr ## EDA-specific deploy + @: + +.PHONY: _eda-apply-cr +_eda-apply-cr: + @if [ "$(CREATE_CR)" != "true" ] || [ ! -f "$(_EDA_DEV_CR)" ]; then exit 0; fi + @if [ -z "$(AUTOMATION_SERVER_URL)" ]; then \ + echo "ERROR: AUTOMATION_SERVER_URL is required. Set it or run 'make awx-url AWX_NAMESPACE=' to discover it." >&2; \ + exit 1; \ + fi + @echo "Applying dev CR: $(_EDA_DEV_CR) (automation_server_url=$(AUTOMATION_SERVER_URL))" + @sed 's|https://awx.example.com|$(AUTOMATION_SERVER_URL)|g' $(_EDA_DEV_CR) | $(KUBECTL) apply -n $(NAMESPACE) -f - + +.PHONY: awx-url +awx-url: ## Discover AWX route URL (use AWX_NAMESPACE to set namespace, default: awx) + @URL=$$($(KUBECTL) get route -n $(AWX_NAMESPACE) -l app.kubernetes.io/managed-by=awx-operator \ + -o jsonpath='https://{.items[0].spec.host}' 2>/dev/null); \ + if [ -z "$$URL" ] || [ "$$URL" = "https://" ]; then \ + echo "ERROR: No AWX route found in namespace $(AWX_NAMESPACE)" >&2; \ + exit 1; \ + fi; \ + echo "$$URL" + +##@ Release + +.PHONY: generate-operator-yaml +generate-operator-yaml: kustomize ## Generate operator.yaml with image tag $(VERSION) + @cd config/manager && $(KUSTOMIZE) edit set image controller=quay.io/ansible/eda-server-operator:${VERSION} + @$(KUSTOMIZE) build config/default > ./operator.yaml + @echo "Generated operator.yaml with image tag $(VERSION)" diff --git a/up.sh b/up.sh deleted file mode 100755 index 15f356c0..00000000 --- a/up.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash -# EDA Operator up.sh - -# -- Usage -# NAMESPACE=eda TAG=dev QUAY_USER=developer ./up.sh - -# -- User Variables -NAMESPACE=${NAMESPACE:-eda} -QUAY_USER=${QUAY_USER:-developer} -TAG=${TAG:-$(git rev-parse --short HEAD)} -DEV_TAG=${DEV_TAG:-dev} -DEV_TAG_PUSH=${DEV_TAG_PUSH:-true} -DEV_CR=${DEV_CR:-dev/eda-cr/eda-openshift-cr.yml} - -# -- Container Build Engine (podman or docker) -ENGINE=${ENGINE:-podman} - -# -- Variables -IMG=quay.io/$QUAY_USER/eda-server-operator -KUBE_APPLY="kubectl apply -n $NAMESPACE -f" - -# -- Wait for existing project to be deleted -# Function to check if the namespace is in terminating state -is_namespace_terminating() { - oc get namespace $NAMESPACE 2>/dev/null | grep -q 'Terminating' - return $? -} - -# Check if the namespace exists and is in terminating state -if kubectl get namespace $NAMESPACE 2>/dev/null; then - echo "Namespace $NAMESPACE exists." - - if is_namespace_terminating; then - echo "Namespace $NAMESPACE is in terminating state. Waiting for it to be fully terminated..." - while is_namespace_terminating; do - sleep 5 - done - echo "Namespace $NAMESPACE has been terminated." - fi -fi - - -# -- Create namespace -kubectl create namespace $NAMESPACE - - -# -- Prepare - -# Set imagePullPolicy to Always -files=( - config/manager/manager.yaml -) -for file in "${files[@]}"; do - if grep -qF 'imagePullPolicy: IfNotPresent' ${file}; then - sed -i -e "s|imagePullPolicy: IfNotPresent|imagePullPolicy: Always|g" ${file}; - fi -done - - -# Delete old operator deployment -oc delete deployment eda-server-operator-controller-manager - -# Create secrets -$KUBE_APPLY dev/secrets/custom-pg-secret.yml -$KUBE_APPLY dev/secrets/custom-db-fields-encryption-secret.yml -$KUBE_APPLY dev/secrets/admin-password-secret.yml - - -# Create Secrets for testing bundle_cacert_secret -kubectl create -n $NAMESPACE secret generic my-custom-certs --from-file=bundle-ca.crt=/etc/pki/tls/cert.pem - - -# -- Login to Quay.io -$ENGINE login quay.io - -if [ $ENGINE = 'podman' ]; then - if [ -f "$XDG_RUNTIME_DIR/containers/auth.json" ] ; then - REGISTRY_AUTH_CONFIG=$XDG_RUNTIME_DIR/containers/auth.json - echo "Found registry auth config: $REGISTRY_AUTH_CONFIG" - elif [ -f $HOME/.config/containers/auth.json ] ; then - REGISTRY_AUTH_CONFIG=$HOME/.config/containers/auth.json - echo "Found registry auth config: $REGISTRY_AUTH_CONFIG" - elif [ -f "/home/$USER/.docker/config.json" ] ; then - REGISTRY_AUTH_CONFIG=/home/$USER/.docker/config.json - echo "Found registry auth config: $REGISTRY_AUTH_CONFIG" - else - echo "No Podman configuration files were found." - fi -fi - -if [ $ENGINE = 'docker' ]; then - if [ -f "/home/$USER/.docker/config.json" ] ; then - REGISTRY_AUTH_CONFIG=/home/$USER/.docker/config.json - echo "Found registry auth config: $REGISTRY_AUTH_CONFIG" - else - echo "No Docker configuration files were found." - fi -fi - - -# -- Build & Push Operator Image -echo "Preparing to build $IMG:$TAG ($IMG:$DEV_TAG) with $ENGINE..." -sleep 3 -HOST_ARCH=$(uname -m) -if [[ "$HOST_ARCH" == "aarch64" || "$HOST_ARCH" == "arm64" ]] && [ "$ENGINE" = "podman" ]; then - echo "ARM architecture detected ($HOST_ARCH). Using multi-arch build..." - make podman-buildx IMG=$IMG:$TAG ENGINE=$ENGINE -else - make docker-build docker-push IMG=$IMG:$TAG -fi - -# Tag and Push DEV_TAG Image when DEV_TAG_PUSH is 'True' -if $DEV_TAG_PUSH ; then - $ENGINE tag $IMG:$TAG $IMG:$DEV_TAG - make docker-push IMG=$IMG:$DEV_TAG -fi - -# -- Deploy Operator -make deploy IMG=$IMG:$TAG NAMESPACE=$NAMESPACE - - -# -- Create CR - -set -euxo pipefail && $KUBE_APPLY $DEV_CR - -# uncomment the CR you want to use -# $KUBE_APPLY dev/eda-cr/eda-openshift-cr.yml -# $KUBE_APPLY dev/eda-cr/eda-k8s-ing.yml -# $KUBE_APPLY dev/eda-cr/eda-k8s-nodeport-cr.yml -# $KUBE_APPLY dev/eda-cr/eda-resource-quota-cr.yml -# $KUBE_APPLY dev/eda-cr/lightweight-eda.yml From 597ddadc6c2a75d7db3f81dadec89d51b626e557 Mon Sep 17 00:00:00 2001 From: Lucas Benedito Date: Mon, 13 Apr 2026 12:51:34 +0100 Subject: [PATCH 2/2] Address PR review feedback for Makefile standardization - Rename _EDA_DEV_CR to DEV_CR in operator.mk for consistency with awx-operator and galaxy-operator - Add ingress fallback to awx-url target for k8s/minikube support - Fix DEV_IMG docs example to avoid double-tag issue Assisted-by: Claude Signed-off-by: Lucas Benedito --- docs/development.md | 2 +- makefiles/operator.mk | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/docs/development.md b/docs/development.md index 3d5595b2..58d9cd0e 100644 --- a/docs/development.md +++ b/docs/development.md @@ -83,7 +83,7 @@ CONTAINER_TOOL=docker QUAY_USER=username make up PLATFORM=linux/amd64 QUAY_USER=username make up # Deploy without building (use an existing image) -BUILD_IMAGE=false DEV_IMG=quay.io/myuser/eda-server-operator:latest make up +BUILD_IMAGE=false DEV_IMG=quay.io/myuser/eda-server-operator DEV_TAG=latest make up ``` ### Accessing the Deployment diff --git a/makefiles/operator.mk b/makefiles/operator.mk index 9d19e2ab..08182c22 100644 --- a/makefiles/operator.mk +++ b/makefiles/operator.mk @@ -9,8 +9,7 @@ IMAGE_TAG_BASE ?= quay.io/ansible/eda-server-operator NAMESPACE ?= eda DEPLOYMENT_NAME ?= eda-server-operator-controller-manager -# Dev CR applied by _eda-apply-cr with URL substitution (not by common DEV_CR mechanism) -_EDA_DEV_CR ?= dev/eda-cr/eda-openshift-cr.yml +DEV_CR ?= dev/eda-cr/eda-openshift-cr.yml # AWX connection (required for EDA) AUTOMATION_SERVER_URL ?= @@ -37,20 +36,24 @@ operator-up: _operator-build-and-push _operator-deploy _operator-wait-ready _ope .PHONY: _eda-apply-cr _eda-apply-cr: - @if [ "$(CREATE_CR)" != "true" ] || [ ! -f "$(_EDA_DEV_CR)" ]; then exit 0; fi + @if [ "$(CREATE_CR)" != "true" ] || [ ! -f "$(DEV_CR)" ]; then exit 0; fi @if [ -z "$(AUTOMATION_SERVER_URL)" ]; then \ echo "ERROR: AUTOMATION_SERVER_URL is required. Set it or run 'make awx-url AWX_NAMESPACE=' to discover it." >&2; \ exit 1; \ fi - @echo "Applying dev CR: $(_EDA_DEV_CR) (automation_server_url=$(AUTOMATION_SERVER_URL))" - @sed 's|https://awx.example.com|$(AUTOMATION_SERVER_URL)|g' $(_EDA_DEV_CR) | $(KUBECTL) apply -n $(NAMESPACE) -f - + @echo "Applying dev CR: $(DEV_CR) (automation_server_url=$(AUTOMATION_SERVER_URL))" + @sed 's|https://awx.example.com|$(AUTOMATION_SERVER_URL)|g' $(DEV_CR) | $(KUBECTL) apply -n $(NAMESPACE) -f - .PHONY: awx-url -awx-url: ## Discover AWX route URL (use AWX_NAMESPACE to set namespace, default: awx) +awx-url: ## Discover AWX URL (route on OCP, ingress on k8s; use AWX_NAMESPACE to set namespace, default: awx) @URL=$$($(KUBECTL) get route -n $(AWX_NAMESPACE) -l app.kubernetes.io/managed-by=awx-operator \ -o jsonpath='https://{.items[0].spec.host}' 2>/dev/null); \ if [ -z "$$URL" ] || [ "$$URL" = "https://" ]; then \ - echo "ERROR: No AWX route found in namespace $(AWX_NAMESPACE)" >&2; \ + URL=$$($(KUBECTL) get ingress -n $(AWX_NAMESPACE) -l app.kubernetes.io/managed-by=awx-operator \ + -o jsonpath='https://{.items[0].spec.rules[0].host}' 2>/dev/null); \ + fi; \ + if [ -z "$$URL" ] || [ "$$URL" = "https://" ]; then \ + echo "ERROR: No AWX route or ingress found in namespace $(AWX_NAMESPACE)" >&2; \ exit 1; \ fi; \ echo "$$URL"