diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index dfd7a5d97..fecd052ee 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -49,6 +49,8 @@ jobs:
box:
- centos/stream9
- centos/stream10
+ iop:
+ - enabled
exclude:
- certificate_source: installer
box: centos/stream10
@@ -57,17 +59,19 @@ jobs:
security: fapolicyd
database: internal
box: centos/stream9
+ iop: disabled
- certificate_source: default
security: none
database: external
box: centos/stream9
+ iop: disabled
runs-on: ubuntu-24.04
env:
FOREMANCTL_BASE_BOX: ${{ matrix.box }}
- name: "Tests (certificates: ${{ matrix.certificate_source }}, database: ${{ matrix.database }}, security: ${{ matrix.security }}, box: ${{ matrix.box }})"
+ name: "Tests (certificates: ${{ matrix.certificate_source }}, database: ${{ matrix.database }}, security: ${{ matrix.security }}, box: ${{ matrix.box }}, iop: ${{ matrix.iop }})"
steps:
- name: generate artifact suffix
- run: echo "ARTIFACT_SUFFIX=$(echo '${{ matrix.certificate_source }}-${{ matrix.security }}-${{ matrix.database }}-${{ matrix.box }}' | tr -cd '[:alnum:]-')" >> "${GITHUB_ENV}"
+ run: echo "ARTIFACT_SUFFIX=$(echo '${{ matrix.certificate_source }}-${{ matrix.security }}-${{ matrix.database }}-${{ matrix.box }}-iop${{ matrix.iop }}' | tr -cd '[:alnum:]-')" >> "${GITHUB_ENV}"
- uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
@@ -120,6 +124,10 @@ jobs:
- name: Add optional features - azure-rm, google and remote-execution
run: |
./foremanctl deploy --add-feature azure-rm --add-feature google --add-feature remote-execution
+ - name: Enable iop
+ if: matrix.iop == 'enabled'
+ run: |
+ ./foremanctl deploy --add-feature iop
- name: Run tests
run: |
./forge test --pytest-args="--certificate-source=${{ matrix.certificate_source }} --database-mode=${{ matrix.database }}"
diff --git a/development/roles/foreman_installer_certs/tasks/main.yml b/development/roles/foreman_installer_certs/tasks/main.yml
index bf4c0f0d9..484ab9c77 100644
--- a/development/roles/foreman_installer_certs/tasks/main.yml
+++ b/development/roles/foreman_installer_certs/tasks/main.yml
@@ -12,5 +12,5 @@
# utilize https://github.com/theforeman/foreman-installer/pull/935
- name: Generate certs
- ansible.builtin.command: foreman-certs --apache true --foreman true --candlepin true
+ ansible.builtin.command: foreman-certs --apache true --foreman true --candlepin true --iop true
changed_when: false
diff --git a/docs/deployment.md b/docs/deployment.md
index 376138a77..263305de2 100644
--- a/docs/deployment.md
+++ b/docs/deployment.md
@@ -42,6 +42,16 @@ These base features control which plugins are enabled when a feature is requeste
A deployment can have multiple base features enabled.
+### Enabling IOP
+
+IOP (Insights Operating Platform) deploys on-premise Insights services for advisor, vulnerability, and remediation. It requires internal database mode and depends on the `rh-cloud` and `katello` features.
+
+```bash
+./foremanctl deploy --add-feature iop
+```
+
+See [IOP Architecture](iop.md) for details on the services deployed and configuration options.
+
### Authenticated Registry Handling
If you need to pull images from private or authenticated container registries, you can configure registry authentication using Podman's auth file.
diff --git a/docs/iop.md b/docs/iop.md
new file mode 100644
index 000000000..6b788d52d
--- /dev/null
+++ b/docs/iop.md
@@ -0,0 +1,139 @@
+# IOP (Insights Operating Platform)
+
+IOP deploys the on-premise Insights services that provide advisor, vulnerability, and remediation capabilities integrated with Foreman.
+
+## Enabling IOP
+
+Add `iop` to `enabled_features` in your flavor configuration. IOP requires internal database mode (`database_mode: internal`).
+
+The `iop` feature depends on `rh-cloud`, which installs the `foreman_rh_cloud` plugin into Foreman and `katello` as a transitive dependency.
+
+## Architecture
+
+IOP runs as a set of containerized services managed via podman quadlets on the `iop-core-network` (bridge, `10.130.0.0/24`). The gateway is registered as a Foreman smart proxy at `https://localhost:24443`.
+
+```mermaid
+graph TB
+ subgraph Host["Host System"]
+ Foreman["Foreman
(foreman_rh_cloud)"]
+ Apache["Apache httpd"]
+ PG[(PostgreSQL)]
+
+ subgraph Network["iop-core-network (10.130.0.0/24)"]
+ Kafka[Kafka]
+
+ subgraph Core["Core Pipeline"]
+ Ingress[Ingress]
+ Puptoo[Puptoo]
+ Yuptoo[Yuptoo]
+ Engine[Engine]
+ end
+
+ Gateway["Gateway
:24443"]
+
+ subgraph Services["Application Services"]
+ Inventory["Inventory API
:8081"]
+ Advisor["Advisor API
:8000"]
+ Remediation["Remediation API
:3000"]
+ VMAAS["VMAAS
(reposcan + webapp)"]
+ Vuln["Vulnerability
(8 containers)"]
+ end
+ end
+
+ subgraph Frontends["Frontend Assets (/var/www/iop)"]
+ AdvisorFE[Advisor Frontend]
+ VulnFE[Vulnerability Frontend]
+ end
+ end
+
+ Foreman -- "smart proxy
relay" --> Gateway
+ Gateway --> Kafka
+ Apache -- "Alias" --> Frontends
+
+ Ingress --> Kafka
+ Puptoo --> Kafka
+ Yuptoo --> Kafka
+ Engine --> Kafka
+
+ Inventory --> Kafka
+ Inventory --> PG
+ Advisor --> Kafka
+ Advisor --> Inventory
+ Advisor -. "FDW" .-> PG
+ Remediation --> Advisor
+ Remediation --> Inventory
+ VMAAS --> PG
+ VMAAS --> Gateway
+ Vuln --> Kafka
+ Vuln --> Inventory
+ Vuln --> VMAAS
+ Vuln -. "FDW" .-> PG
+```
+
+### Services
+
+| Service | Container(s) | Port | Description |
+|---------|-------------|------|-------------|
+| kafka | `iop-core-kafka` | 9092 (internal) | Message broker (KRaft mode) |
+| ingress | `iop-core-ingress` | 8080 (internal) | Upload ingestion endpoint |
+| puptoo | `iop-core-puptoo` | - | Puppet/system facts processor |
+| yuptoo | `iop-core-yuptoo` | - | Yum/package data processor |
+| engine | `iop-core-engine` | - | Insights rules engine |
+| gateway | `iop-core-gateway` | 127.0.0.1:24443 | nginx proxy, smart proxy relay to Foreman |
+| inventory | `iop-core-host-inventory`, `iop-core-host-inventory-api` | 8081 (internal) | Host inventory with MQ consumer and REST API |
+| advisor | `iop-service-advisor-backend-api`, `iop-service-advisor-backend-service` | 8000 (internal) | Advisor recommendations |
+| remediation | `iop-service-remediations-api` | 3000 (host network) | Remediation playbook generation |
+| vmaas | `iop-service-vmaas-reposcan`, `iop-service-vmaas-webapp-go` | - | Vulnerability metadata and advisory sync |
+| vulnerability | 8 containers (manager, taskomatic, grouper, listener, evaluators, vmaas-sync) | 8443 (internal) | Vulnerability assessment pipeline |
+
+### Frontend Assets
+
+Advisor and vulnerability frontend assets are extracted from container images and served by Apache:
+
+- Assets are deployed to `/var/www/iop/assets/apps/{advisor,vulnerability}`
+- Apache serves them via `Alias` directives in `/etc/httpd/conf.d/05-foreman-ssl.d/`
+- Assets include gzip precompression support and 1-year cache headers
+
+### Databases
+
+IOP creates five PostgreSQL databases, all accessible to containers via `host.containers.internal:5432`:
+
+| Database | User |
+|----------|------|
+| `inventory_db` | `inventory_admin` |
+| `advisor_db` | `advisor_user` |
+| `remediations_db` | `remediations_user` |
+| `vmaas_db` | `vmaas_admin` |
+| `vulnerability_db` | `vulnerability_admin` |
+
+Advisor and vulnerability services use PostgreSQL foreign data wrappers (FDW) to query the inventory database directly.
+
+## Configuration
+
+### Foreman Connection
+
+Set in the playbook vars or inventory to match your Foreman deployment:
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `iop_core_foreman_url` | `https://{{ ansible_facts['fqdn'] }}` | Foreman server URL |
+| `iop_core_foreman_admin_username` | `admin` | Foreman admin username |
+| `iop_core_foreman_admin_password` | `changeme` | Foreman admin password |
+
+### Certificates
+
+Gateway certificates are configured per certificate source:
+
+**Default certificates** (`certificate_source: default`):
+- Server: `/root/certificates/certs/localhost.crt`
+- Client: `/root/certificates/certs/localhost-client.crt`
+- CA: `/root/certificates/certs/ca.crt`
+
+**Installer certificates** (`certificate_source: installer`):
+- Server: `/root/ssl-build/localhost/localhost-iop-core-gateway-server.crt`
+- Client: `/root/ssl-build/localhost/localhost-iop-core-gateway-client.crt`
+- CA: `/root/ssl-build/katello-default-ca.crt`
+
+### Container Images
+
+All IOP images default to `quay.io/iop/:foreman-3.16`. Each role exposes `iop__container_image` and `iop__container_tag` variables to override.
diff --git a/src/features.yaml b/src/features.yaml
index 295139b13..5a77a1585 100644
--- a/src/features.yaml
+++ b/src/features.yaml
@@ -33,3 +33,14 @@ dynflow:
internal: true
foreman_proxy:
plugin_name: dynflow
+rh-cloud:
+ description: Connection to Red Hat Cloud
+ foreman:
+ plugin_name: foreman_rh_cloud
+ hammer: foreman_rh_cloud
+ dependencies:
+ - katello
+iop:
+ description: iop services
+ dependencies:
+ - rh-cloud
diff --git a/src/playbooks/deploy/deploy.yaml b/src/playbooks/deploy/deploy.yaml
index 89180f1bc..096c76519 100644
--- a/src/playbooks/deploy/deploy.yaml
+++ b/src/playbooks/deploy/deploy.yaml
@@ -12,6 +12,20 @@
- "../../vars/database.yml"
- "../../vars/foreman.yml"
- "../../vars/base.yaml"
+ pre_tasks:
+ - name: Add iop databases
+ when:
+ - "'iop' in enabled_features"
+ - database_mode == 'internal'
+ block:
+ - name: Include iop databases
+ ansible.builtin.include_vars:
+ file: "../../vars/database_iop.yml"
+
+ - name: Combine lists
+ ansible.builtin.set_fact:
+ postgresql_databases: "{{ postgresql_databases + iop_postgresql_databases }}"
+ postgresql_users: "{{ postgresql_users + iop_postgresql_users }}"
roles:
- role: pre_install
- role: checks
@@ -31,6 +45,10 @@
- pulp
- foreman
- role: systemd_target
+ - role: iop_core
+ when:
+ - "'iop' in enabled_features"
+ - database_mode == 'internal'
- role: foreman_proxy
when:
- "'foreman-proxy' in enabled_features"
diff --git a/src/requirements.yml b/src/requirements.yml
index 977126086..2672d6a35 100644
--- a/src/requirements.yml
+++ b/src/requirements.yml
@@ -1,4 +1,5 @@
collections:
+ - community.general
- community.postgresql
- community.crypto
- community.general
diff --git a/src/roles/foreman/tasks/main.yaml b/src/roles/foreman/tasks/main.yaml
index 43139ead5..05650bc22 100644
--- a/src/roles/foreman/tasks/main.yaml
+++ b/src/roles/foreman/tasks/main.yaml
@@ -167,6 +167,8 @@
[Service]
Restart=on-failure
RestartSec=1
+ notify:
+ - Restart dynflow-sidekiq@
- name: Create Dynflow Container instances
ansible.builtin.file:
diff --git a/src/roles/iop_advisor/defaults/main.yaml b/src/roles/iop_advisor/defaults/main.yaml
new file mode 100644
index 000000000..0d7b0d1f7
--- /dev/null
+++ b/src/roles/iop_advisor/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+iop_advisor_container_image: "quay.io/iop/advisor-backend"
+iop_advisor_container_tag: "foreman-3.16"
+
+iop_advisor_database_name: advisor_db
+iop_advisor_database_user: advisor_user
+iop_advisor_database_password: CHANGEME
+iop_advisor_database_host: host.containers.internal
+iop_advisor_database_port: 5432
diff --git a/src/roles/iop_advisor/handlers/main.yaml b/src/roles/iop_advisor/handlers/main.yaml
new file mode 100644
index 000000000..126e16bf6
--- /dev/null
+++ b/src/roles/iop_advisor/handlers/main.yaml
@@ -0,0 +1,28 @@
+---
+- name: Check if advisor backend api service exists
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-api
+ register: iop_advisor_api_service_status
+ failed_when: false
+ listen: restart advisor
+
+- name: Restart advisor backend api service if it exists
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-api
+ state: restarted
+ when: iop_advisor_api_service_status.status is defined and iop_advisor_api_service_status.status.LoadState != "not-found"
+ listen: restart advisor
+
+- name: Check if advisor backend service exists
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-service
+ register: iop_advisor_service_status
+ failed_when: false
+ listen: restart advisor
+
+- name: Restart advisor backend service if it exists
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-service
+ state: restarted
+ when: iop_advisor_service_status.status is defined and iop_advisor_service_status.status.LoadState != "not-found"
+ listen: restart advisor
diff --git a/src/roles/iop_advisor/tasks/main.yaml b/src/roles/iop_advisor/tasks/main.yaml
new file mode 100644
index 000000000..ad26a342f
--- /dev/null
+++ b/src/roles/iop_advisor/tasks/main.yaml
@@ -0,0 +1,136 @@
+---
+- name: Pull Advisor Backend container image
+ containers.podman.podman_image:
+ name: "{{ iop_advisor_container_image }}:{{ iop_advisor_container_tag }}"
+ state: present
+
+- name: Create podman secret for advisor database username
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-username
+ data: "{{ iop_advisor_database_user }}"
+ notify: restart advisor
+
+- name: Create podman secret for advisor database password
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-password
+ data: "{{ iop_advisor_database_password }}"
+ notify: restart advisor
+
+- name: Create podman secret for advisor database name
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-name
+ data: "{{ iop_advisor_database_name }}"
+ notify: restart advisor
+
+- name: Create podman secret for advisor database host
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-host
+ data: "{{ iop_advisor_database_host }}"
+ notify: restart advisor
+
+- name: Create podman secret for advisor database port
+ containers.podman.podman_secret:
+ name: iop-service-advisor-backend-database-port
+ data: "{{ iop_advisor_database_port }}"
+ notify: restart advisor
+
+- name: Deploy Advisor Backend API Container
+ containers.podman.podman_container:
+ name: iop-service-advisor-backend-api
+ image: "{{ iop_advisor_container_image }}:{{ iop_advisor_container_tag }}"
+ state: quadlet
+ command: sh -c "./container_init.sh && api/app.sh"
+ network:
+ - iop-core-network
+ env:
+ DJANGO_SESSION_KEY: "UNUSED"
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ ADVISOR_ENV: "prod"
+ LOG_LEVEL: "INFO"
+ USE_DJANGO_WEBSERVER: "false"
+ CLOWDER_ENABLED: "false"
+ WEB_CONCURRENCY: "2"
+ ENABLE_AUTOSUB: "true"
+ TASKS_REWRITE_INTERNAL_URLS: "true"
+ TASKS_REWRITE_INTERNAL_URLS_FOR: "internal.localhost"
+ ENABLE_INIT_CONTAINER_MIGRATIONS: "true"
+ ENABLE_INIT_CONTAINER_IMPORT_CONTENT: "true"
+ IMAGE: "latest"
+ ALLOWED_HOSTS: "*"
+ INVENTORY_SERVER_URL: "http://iop-core-host-inventory-api:8081/api/inventory/v1"
+ ADVISOR_DB_SSL_MODE: "disable"
+ PORT: "8000"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-service-advisor-backend-database-username,type=env,target=ADVISOR_DB_USER'
+ - 'iop-service-advisor-backend-database-password,type=env,target=ADVISOR_DB_PASSWORD'
+ - 'iop-service-advisor-backend-database-name,type=env,target=ADVISOR_DB_NAME'
+ - 'iop-service-advisor-backend-database-host,type=env,target=ADVISOR_DB_HOST'
+ - 'iop-service-advisor-backend-database-port,type=env,target=ADVISOR_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Advisor Backend API
+ After=iop-core-kafka.service
+ Wants=iop-core-kafka.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy Advisor Backend Service Container
+ containers.podman.podman_container:
+ name: iop-service-advisor-backend-service
+ image: "{{ iop_advisor_container_image }}:{{ iop_advisor_container_tag }}"
+ state: quadlet
+ command: pipenv run python service/service.py
+ network:
+ - iop-core-network
+ env:
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ ADVISOR_DB_SSL_MODE: "disable"
+ DISABLE_WEB_SERVER: "true"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-service-advisor-backend-database-username,type=env,target=ADVISOR_DB_USER'
+ - 'iop-service-advisor-backend-database-password,type=env,target=ADVISOR_DB_PASSWORD'
+ - 'iop-service-advisor-backend-database-name,type=env,target=ADVISOR_DB_NAME'
+ - 'iop-service-advisor-backend-database-host,type=env,target=ADVISOR_DB_HOST'
+ - 'iop-service-advisor-backend-database-port,type=env,target=ADVISOR_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Advisor Backend Service
+ After=iop-core-kafka.service
+ Wants=iop-core-kafka.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Advisor Backend API service
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-api
+ enabled: true
+ state: started
+
+- name: Start Advisor Backend Service
+ ansible.builtin.systemd:
+ name: iop-service-advisor-backend-service
+ enabled: true
+ state: started
+
+- name: Set up Foreign Data Wrapper for advisor database
+ ansible.builtin.include_role:
+ name: iop_fdw
+ vars:
+ iop_fdw_database_name: "{{ iop_advisor_database_name }}"
+ iop_fdw_database_user: "{{ iop_advisor_database_user }}"
+ iop_fdw_database_password: "{{ iop_advisor_database_password }}"
+ iop_fdw_remote_database_name: "{{ iop_inventory_database_name }}"
+ iop_fdw_remote_user: "{{ iop_inventory_database_user }}"
+ iop_fdw_remote_password: "{{ iop_inventory_database_password }}"
diff --git a/src/roles/iop_advisor_frontend/defaults/main.yaml b/src/roles/iop_advisor_frontend/defaults/main.yaml
new file mode 100644
index 000000000..90f9adc2d
--- /dev/null
+++ b/src/roles/iop_advisor_frontend/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+iop_advisor_frontend_container_image: "quay.io/iop/advisor-frontend"
+iop_advisor_frontend_container_tag: "foreman-3.16"
+iop_advisor_frontend_assets_path: "/var/www/iop/assets/apps/advisor"
+iop_advisor_frontend_source_path: "/srv/dist/."
diff --git a/src/roles/iop_advisor_frontend/tasks/main.yaml b/src/roles/iop_advisor_frontend/tasks/main.yaml
new file mode 100644
index 000000000..35849771b
--- /dev/null
+++ b/src/roles/iop_advisor_frontend/tasks/main.yaml
@@ -0,0 +1,91 @@
+---
+- name: Pull Advisor Frontend container image
+ containers.podman.podman_image:
+ name: "{{ iop_advisor_frontend_container_image }}:{{ iop_advisor_frontend_container_tag }}"
+ state: present
+
+- name: Ensure parent assets directory exists
+ ansible.builtin.file:
+ path: /var/www/iop/assets/apps
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+
+- name: Ensure assets directory exists
+ ansible.builtin.file:
+ path: "{{ iop_advisor_frontend_assets_path }}"
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+
+- name: Create temporary container for asset extraction
+ containers.podman.podman_container:
+ name: iop-advisor-frontend-temp
+ image: "{{ iop_advisor_frontend_container_image }}:{{ iop_advisor_frontend_container_tag }}"
+ state: created
+
+- name: Extract advisor frontend assets from container
+ containers.podman.podman_container_copy:
+ container: iop-advisor-frontend-temp
+ src: "{{ iop_advisor_frontend_source_path }}"
+ dest: "{{ iop_advisor_frontend_assets_path }}"
+ from_container: true
+
+- name: Restore SELinux context for advisor frontend assets
+ ansible.builtin.command:
+ cmd: restorecon -R "{{ iop_advisor_frontend_assets_path }}"
+ when: ansible_facts['selinux']['status'] == "enabled"
+ changed_when: false
+
+- name: Remove temporary container
+ containers.podman.podman_container:
+ name: iop-advisor-frontend-temp
+ state: absent
+
+- name: Set ownership of advisor frontend assets
+ ansible.builtin.file:
+ path: "{{ iop_advisor_frontend_assets_path }}"
+ owner: root
+ group: root
+ recurse: true
+
+- name: Ensure Apache SSL config directory exists
+ ansible.builtin.file:
+ path: /etc/httpd/conf.d/05-foreman-ssl.d
+ state: directory
+ mode: '0755'
+
+- name: Configure Apache for advisor frontend assets
+ ansible.builtin.copy:
+ dest: /etc/httpd/conf.d/05-foreman-ssl.d/advisor-frontend.conf
+ content: |
+ # IOP Advisor Frontend Assets Configuration
+ Alias /assets/apps/advisor {{ iop_advisor_frontend_assets_path }}
+ ProxyPass /assets/apps/advisor !
+
+
+ Options SymLinksIfOwnerMatch
+ AllowOverride None
+ Require all granted
+
+ # Use standard http expire header for assets instead of ETag
+
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+
+
+ # Return compressed assets if they are precompiled
+ RewriteEngine On
+ # Make sure the browser supports gzip encoding and file with .gz added
+ # does exist on disc before we rewrite with the extension
+ RewriteCond %{HTTP:Accept-Encoding} \b(x-)?gzip\b
+ RewriteCond %{REQUEST_FILENAME} \.(css|js|svg)$
+ RewriteCond %{REQUEST_FILENAME}.gz -s
+ RewriteRule ^(.+) $1.gz [L]
+
+ mode: '0644'
+ notify: "httpd : Restart httpd"
diff --git a/src/roles/iop_core/defaults/main.yaml b/src/roles/iop_core/defaults/main.yaml
new file mode 100644
index 000000000..49d4ab6bd
--- /dev/null
+++ b/src/roles/iop_core/defaults/main.yaml
@@ -0,0 +1,4 @@
+---
+iop_core_foreman_url: "https://{{ ansible_facts['fqdn'] }}"
+iop_core_foreman_admin_username: admin
+iop_core_foreman_admin_password: changeme
diff --git a/src/roles/iop_core/tasks/main.yaml b/src/roles/iop_core/tasks/main.yaml
new file mode 100644
index 000000000..cc6867f35
--- /dev/null
+++ b/src/roles/iop_core/tasks/main.yaml
@@ -0,0 +1,65 @@
+---
+- name: Deploy IOP Network
+ ansible.builtin.include_role:
+ name: iop_network
+
+- name: Deploy IOP Kafka service
+ ansible.builtin.include_role:
+ name: iop_kafka
+
+- name: Deploy IOP Ingress service
+ ansible.builtin.include_role:
+ name: iop_ingress
+
+- name: Deploy IOP Puptoo service
+ ansible.builtin.include_role:
+ name: iop_puptoo
+
+- name: Deploy IOP Yuptoo service
+ ansible.builtin.include_role:
+ name: iop_yuptoo
+
+- name: Deploy IOP Engine service
+ ansible.builtin.include_role:
+ name: iop_engine
+
+- name: Deploy IOP Gateway service
+ ansible.builtin.include_role:
+ name: iop_gateway
+
+- name: Register IOP Gateway as smart proxy
+ theforeman.foreman.smart_proxy:
+ name: "iop-gateway"
+ url: "https://localhost:24443"
+ server_url: "{{ iop_core_foreman_url }}"
+ username: "{{ iop_core_foreman_admin_username }}"
+ password: "{{ iop_core_foreman_admin_password }}"
+ validate_certs: false
+
+- name: Deploy IOP Inventory service
+ ansible.builtin.include_role:
+ name: iop_inventory
+
+- name: Deploy IOP Advisor service
+ ansible.builtin.include_role:
+ name: iop_advisor
+
+- name: Deploy IOP Remediation service
+ ansible.builtin.include_role:
+ name: iop_remediation
+
+- name: Deploy IOP VMAAS service
+ ansible.builtin.include_role:
+ name: iop_vmaas
+
+- name: Deploy IOP Vulnerability service
+ ansible.builtin.include_role:
+ name: iop_vulnerability
+
+- name: Deploy IOP Advisor Frontend
+ ansible.builtin.include_role:
+ name: iop_advisor_frontend
+
+- name: Deploy IOP Vulnerability Frontend
+ ansible.builtin.include_role:
+ name: iop_vulnerability_frontend
diff --git a/src/roles/iop_engine/defaults/main.yaml b/src/roles/iop_engine/defaults/main.yaml
new file mode 100644
index 000000000..4a7922229
--- /dev/null
+++ b/src/roles/iop_engine/defaults/main.yaml
@@ -0,0 +1,8 @@
+---
+iop_engine_container_image: "quay.io/iop/insights-engine"
+iop_engine_container_tag: "foreman-3.16"
+
+iop_engine_packages:
+ - "insights.specs.default"
+ - "insights.specs.insights_archive"
+ - "insights_kafka_service.rules"
diff --git a/src/roles/iop_engine/handlers/main.yaml b/src/roles/iop_engine/handlers/main.yaml
new file mode 100644
index 000000000..c090e2051
--- /dev/null
+++ b/src/roles/iop_engine/handlers/main.yaml
@@ -0,0 +1,7 @@
+---
+- name: Restart engine
+ ansible.builtin.systemd:
+ name: iop-core-engine
+ state: restarted
+ listen: restart engine
+ when: ansible_facts.services['iop-core-engine.service'] is defined
diff --git a/src/roles/iop_engine/tasks/main.yaml b/src/roles/iop_engine/tasks/main.yaml
new file mode 100644
index 000000000..5bf70ee8b
--- /dev/null
+++ b/src/roles/iop_engine/tasks/main.yaml
@@ -0,0 +1,46 @@
+---
+- name: Pull Engine container image
+ containers.podman.podman_image:
+ name: "{{ iop_engine_container_image }}:{{ iop_engine_container_tag }}"
+ state: present
+
+- name: Create Engine config secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-engine-config-yml
+ data: "{{ lookup('ansible.builtin.template', 'engine/config.yml.j2') }}"
+ notify: restart engine
+
+- name: Deploy Engine container
+ containers.podman.podman_container:
+ name: iop-core-engine
+ image: "{{ iop_engine_container_image }}:{{ iop_engine_container_tag }}"
+ state: quadlet
+ command: insights-core-engine /var/config.yml
+ secrets:
+ - 'iop-core-engine-config-yml,target=/var/config.yml,mode=0440,uid=1000,type=mount'
+ etc_hosts:
+ console.redhat.com: "127.0.0.1"
+ network:
+ - iop-core-network
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Engine Container
+ After=iop-core-kafka.service iop-core-ingress.service iop-core-puptoo.service
+ Wants=iop-core-kafka.service iop-core-ingress.service iop-core-puptoo.service
+ [Service]
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Engine service
+ ansible.builtin.systemd:
+ name: iop-core-engine
+ enabled: true
+ state: started
diff --git a/src/roles/iop_engine/templates/engine/config.yml.j2 b/src/roles/iop_engine/templates/engine/config.yml.j2
new file mode 100644
index 000000000..a62b79022
--- /dev/null
+++ b/src/roles/iop_engine/templates/engine/config.yml.j2
@@ -0,0 +1,33 @@
+plugins:
+ default_component_enabled: true
+ packages:
+{% for package in iop_engine_packages %}
+ - {{ package }}
+{% endfor %}
+configs: []
+service:
+ extract_timeout: 10
+ unpacked_archive_size_limit: 1800000000
+ extract_tmp_dir:
+ format: insights_kafka_service.formats._insights.InsightsFormat
+ target_components: []
+ consumer:
+ name: "insights_kafka_service.consumer.InsightsKafkaConsumer"
+ kwargs:
+ services:
+ - "advisor"
+ group_id: "insights-core-kafka"
+ queued.max.messages.kbytes: 10000
+ session.timeout.ms: 30000
+ max.poll.interval.ms: 600000
+ bootstrap_servers:
+ - "iop-core-kafka:9092"
+ incoming_topic: platform.inventory.events
+ publisher:
+ name: "insights_kafka_service.producer.InsightsKafkaProducer"
+ kwargs:
+ bootstrap_servers:
+ - "iop-core-kafka:9092"
+ topic: platform.engine.results
+ downloader:
+ name: "insights_messaging.downloaders.httpfs.Http"
diff --git a/src/roles/iop_fdw/defaults/main.yaml b/src/roles/iop_fdw/defaults/main.yaml
new file mode 100644
index 000000000..4b669addf
--- /dev/null
+++ b/src/roles/iop_fdw/defaults/main.yaml
@@ -0,0 +1,20 @@
+---
+# Required parameters - must be passed by caller
+iop_fdw_database_name: ""
+iop_fdw_database_user: ""
+iop_fdw_database_password: ""
+iop_fdw_remote_database_name: ""
+iop_fdw_remote_user: ""
+iop_fdw_remote_password: ""
+
+# Optional parameters - can use defaults
+iop_fdw_database_host: "localhost"
+iop_fdw_database_port: 5432
+
+# Constants - same for all invocations (matching puppet-iop)
+iop_fdw_foreign_server_name: hbi_server
+iop_fdw_remote_table_schema: inventory
+iop_fdw_remote_table_name: hosts
+iop_fdw_local_source_schema: inventory_source
+iop_fdw_local_view_schema: inventory
+iop_fdw_local_view_name: hosts
diff --git a/src/roles/iop_fdw/handlers/main.yaml b/src/roles/iop_fdw/handlers/main.yaml
new file mode 100644
index 000000000..1aeb62a47
--- /dev/null
+++ b/src/roles/iop_fdw/handlers/main.yaml
@@ -0,0 +1,3 @@
+---
+# Handlers for iop_fdw role
+# Currently no specific handlers needed for FDW operations
diff --git a/src/roles/iop_fdw/tasks/main.yaml b/src/roles/iop_fdw/tasks/main.yaml
new file mode 100644
index 000000000..ec21087f7
--- /dev/null
+++ b/src/roles/iop_fdw/tasks/main.yaml
@@ -0,0 +1,165 @@
+---
+- name: Install PostgreSQL client for FDW operations
+ ansible.builtin.package:
+ name: postgresql
+ state: present
+
+- name: Enable postgres_fdw extension on target database
+ community.postgresql.postgresql_ext:
+ name: postgres_fdw
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+
+- name: Check if foreign server exists
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "SELECT srvname FROM pg_foreign_server WHERE srvname = %s"
+ positional_args:
+ - "{{ iop_fdw_foreign_server_name }}"
+ register: iop_fdw_foreign_server_check
+ changed_when: false
+
+- name: Create foreign server for inventory database
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ CREATE SERVER {{ iop_fdw_foreign_server_name }}
+ FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (host %s, port %s, dbname %s)
+ positional_args:
+ - "{{ iop_fdw_database_host }}"
+ - "{{ iop_fdw_database_port | string }}"
+ - "{{ iop_fdw_remote_database_name }}"
+ when: iop_fdw_foreign_server_check.rowcount == 0
+
+- name: Check if user mapping exists for service user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "SELECT umuser FROM pg_user_mappings WHERE srvname = %s AND usename = %s"
+ positional_args:
+ - "{{ iop_fdw_foreign_server_name }}"
+ - "{{ iop_fdw_database_user }}"
+ register: iop_fdw_user_mapping_check
+ changed_when: false
+
+- name: Create user mapping for service user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ CREATE USER MAPPING FOR {{ iop_fdw_database_user }}
+ SERVER {{ iop_fdw_foreign_server_name }}
+ OPTIONS (user %s, password %s)
+ positional_args:
+ - "{{ iop_fdw_remote_user }}"
+ - "{{ iop_fdw_remote_password }}"
+ when: iop_fdw_user_mapping_check.rowcount == 0
+
+- name: Check if user mapping exists for postgres user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "SELECT umuser FROM pg_user_mappings WHERE srvname = %s AND usename = 'postgres'"
+ positional_args:
+ - "{{ iop_fdw_foreign_server_name }}"
+ register: iop_fdw_postgres_mapping_check
+ changed_when: false
+
+- name: Create user mapping for postgres user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ CREATE USER MAPPING FOR postgres
+ SERVER {{ iop_fdw_foreign_server_name }}
+ OPTIONS (user %s, password %s)
+ positional_args:
+ - "{{ iop_fdw_remote_user }}"
+ - "{{ iop_fdw_remote_password }}"
+ when: iop_fdw_postgres_mapping_check.rowcount == 0
+
+- name: Grant usage on foreign server
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "GRANT USAGE ON FOREIGN SERVER {{ iop_fdw_foreign_server_name }} TO {{ iop_fdw_database_user }}"
+
+- name: Create local view schema
+ community.postgresql.postgresql_schema:
+ db: "{{ iop_fdw_database_name }}"
+ name: "{{ iop_fdw_local_view_schema }}"
+ owner: "{{ iop_fdw_database_user }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+
+- name: Create local schema for foreign tables
+ community.postgresql.postgresql_schema:
+ db: "{{ iop_fdw_database_name }}"
+ name: "{{ iop_fdw_local_source_schema }}"
+ owner: "{{ iop_fdw_database_user }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+
+- name: Check if foreign table exists
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: "SELECT foreign_table_name FROM information_schema.foreign_tables WHERE foreign_table_schema = %s AND foreign_table_name = %s"
+ positional_args:
+ - "{{ iop_fdw_local_source_schema }}"
+ - "{{ iop_fdw_remote_table_name }}"
+ register: iop_fdw_foreign_table_check
+ changed_when: false
+
+- name: Import foreign schema
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ IMPORT FOREIGN SCHEMA {{ iop_fdw_remote_table_schema }}
+ LIMIT TO ({{ iop_fdw_remote_table_name }})
+ FROM SERVER {{ iop_fdw_foreign_server_name }}
+ INTO {{ iop_fdw_local_source_schema }}
+ when: iop_fdw_foreign_table_check.rowcount == 0
+
+- name: Create local view pointing to foreign table
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ CREATE OR REPLACE VIEW "{{ iop_fdw_local_view_schema }}"."{{ iop_fdw_local_view_name }}" AS
+ SELECT * FROM "{{ iop_fdw_local_source_schema }}"."{{ iop_fdw_remote_table_name }}"
+
+- name: Grant select on foreign table to service user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ GRANT USAGE ON SCHEMA {{ iop_fdw_local_source_schema }} TO {{ iop_fdw_database_user }};
+ GRANT USAGE ON SCHEMA {{ iop_fdw_local_view_schema }} TO {{ iop_fdw_database_user }};
+ GRANT SELECT ON {{ iop_fdw_local_source_schema }}.{{ iop_fdw_remote_table_name }} TO {{ iop_fdw_database_user }};
+ GRANT SELECT ON {{ iop_fdw_local_view_schema }}.{{ iop_fdw_local_view_name }} TO {{ iop_fdw_database_user }};
+
+- name: Grant permissions on remote database view to remote user
+ community.postgresql.postgresql_query:
+ db: "{{ iop_fdw_remote_database_name }}"
+ login_user: postgres
+ login_host: "{{ iop_fdw_database_host }}"
+ query: |
+ GRANT USAGE ON SCHEMA {{ iop_fdw_remote_table_schema }} TO {{ iop_fdw_remote_user }};
+ GRANT SELECT ON {{ iop_fdw_remote_table_schema }}.{{ iop_fdw_local_view_name }} TO {{ iop_fdw_remote_user }};
diff --git a/src/roles/iop_gateway/defaults/main.yaml b/src/roles/iop_gateway/defaults/main.yaml
new file mode 100644
index 000000000..f0c893c8c
--- /dev/null
+++ b/src/roles/iop_gateway/defaults/main.yaml
@@ -0,0 +1,11 @@
+---
+iop_gateway_container_image: "quay.io/iop/gateway"
+iop_gateway_container_tag: "foreman-3.16"
+
+# Certificate paths - gateway server uses localhost certs to match puppet-iop behavior
+iop_gateway_server_certificate: "/root/certificates/certs/localhost.crt"
+iop_gateway_server_key: "/root/certificates/private/localhost.key"
+iop_gateway_server_ca_certificate: "/root/certificates/certs/ca.crt"
+iop_gateway_client_certificate: "/root/certificates/certs/localhost-client.crt"
+iop_gateway_client_key: "/root/certificates/private/localhost-client.key"
+iop_gateway_client_ca_certificate: "/root/certificates/certs/ca.crt"
diff --git a/src/roles/iop_gateway/handlers/main.yaml b/src/roles/iop_gateway/handlers/main.yaml
new file mode 100644
index 000000000..40d60d7f8
--- /dev/null
+++ b/src/roles/iop_gateway/handlers/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Check if gateway service exists
+ ansible.builtin.systemd:
+ name: iop-core-gateway
+ register: iop_gateway_service_status
+ failed_when: false
+ listen: restart gateway
+
+- name: Restart gateway service if it exists
+ ansible.builtin.systemd:
+ name: iop-core-gateway
+ state: restarted
+ when: iop_gateway_service_status.status is defined and iop_gateway_service_status.status.LoadState != "not-found"
+ listen: restart gateway
diff --git a/src/roles/iop_gateway/tasks/main.yaml b/src/roles/iop_gateway/tasks/main.yaml
new file mode 100644
index 000000000..13804a9e5
--- /dev/null
+++ b/src/roles/iop_gateway/tasks/main.yaml
@@ -0,0 +1,95 @@
+---
+- name: Pull Gateway container image
+ containers.podman.podman_image:
+ name: "{{ iop_gateway_container_image }}:{{ iop_gateway_container_tag }}"
+ state: present
+
+- name: Create Gateway server certificate secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-server-cert
+ path: "{{ iop_gateway_server_certificate }}"
+ notify: restart gateway
+
+- name: Create Gateway server key secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-server-key
+ path: "{{ iop_gateway_server_key }}"
+ notify: restart gateway
+
+- name: Create Gateway server CA certificate secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-server-ca-cert
+ path: "{{ iop_gateway_server_ca_certificate }}"
+ notify: restart gateway
+
+- name: Create Gateway client certificate secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-client-cert
+ path: "{{ iop_gateway_client_certificate }}"
+ notify: restart gateway
+
+- name: Create Gateway client key secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-client-key
+ path: "{{ iop_gateway_client_key }}"
+ notify: restart gateway
+
+- name: Create Gateway client CA certificate secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-client-ca-cert
+ path: "{{ iop_gateway_client_ca_certificate }}"
+ notify: restart gateway
+
+- name: Create Gateway relay configuration secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-gateway-relay-conf
+ data: "{{ lookup('ansible.builtin.template', 'relay.conf.j2') }}"
+ notify: restart gateway
+
+- name: Deploy Gateway container
+ containers.podman.podman_container:
+ name: iop-core-gateway
+ image: "{{ iop_gateway_container_image }}:{{ iop_gateway_container_tag }}"
+ state: quadlet
+ network:
+ - iop-core-network
+ publish:
+ - "127.0.0.1:24443:8443"
+ env:
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-gateway-server-cert,target=/etc/nginx/certs/nginx.crt,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-server-key,target=/etc/nginx/certs/nginx.key,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-server-ca-cert,target=/etc/nginx/certs/ca.crt,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-client-cert,target=/etc/nginx/smart-proxy-relay/certs/proxy.crt,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-client-key,target=/etc/nginx/smart-proxy-relay/certs/proxy.key,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-client-ca-cert,target=/etc/nginx/smart-proxy-relay/certs/ca.crt,mode=0440,uid=998,gid=998,type=mount'
+ - 'iop-core-gateway-relay-conf,target=/etc/nginx/smart-proxy-relay/relay.conf,mode=0440,uid=998,gid=998,type=mount'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Gateway Container
+ After=iop-core-kafka.service iop-core-engine.service iop-core-ingress.service
+ Wants=iop-core-kafka.service iop-core-engine.service iop-core-ingress.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=multi-user.target
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Gateway service
+ ansible.builtin.systemd:
+ name: iop-core-gateway
+ enabled: true
+ state: started
diff --git a/src/roles/iop_gateway/templates/relay.conf.j2 b/src/roles/iop_gateway/templates/relay.conf.j2
new file mode 100644
index 000000000..8d13704ae
--- /dev/null
+++ b/src/roles/iop_gateway/templates/relay.conf.j2
@@ -0,0 +1,6 @@
+# (REQUIRED) CName of the Foreman instance (must match Foreman's TLS certificate)
+proxy_ssl_name "{{ foreman_servername | default(ansible_fqdn) }}";
+
+# URI to forman
+# Example of host.containers.internal is the container network gateway.
+proxy_pass "https://host.containers.internal";
diff --git a/src/roles/iop_ingress/defaults/main.yaml b/src/roles/iop_ingress/defaults/main.yaml
new file mode 100644
index 000000000..eeefa0b31
--- /dev/null
+++ b/src/roles/iop_ingress/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+iop_ingress_container_image: "quay.io/iop/ingress"
+iop_ingress_container_tag: "foreman-3.16"
diff --git a/src/roles/iop_ingress/handlers/main.yaml b/src/roles/iop_ingress/handlers/main.yaml
new file mode 100644
index 000000000..c00101a0d
--- /dev/null
+++ b/src/roles/iop_ingress/handlers/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Check if ingress service exists
+ ansible.builtin.systemd:
+ name: iop-core-ingress
+ register: iop_ingress_service_status
+ failed_when: false
+ listen: restart ingress
+
+- name: Restart ingress service if it exists
+ ansible.builtin.systemd:
+ name: iop-core-ingress
+ state: restarted
+ when: iop_ingress_service_status.status is defined and iop_ingress_service_status.status.LoadState != "not-found"
+ listen: restart ingress
diff --git a/src/roles/iop_ingress/tasks/main.yaml b/src/roles/iop_ingress/tasks/main.yaml
new file mode 100644
index 000000000..dc461587e
--- /dev/null
+++ b/src/roles/iop_ingress/tasks/main.yaml
@@ -0,0 +1,40 @@
+---
+- name: Pull Ingress container image
+ containers.podman.podman_image:
+ name: "{{ iop_ingress_container_image }}:{{ iop_ingress_container_tag }}"
+ state: present
+
+- name: Deploy Ingress container
+ containers.podman.podman_container:
+ name: iop-core-ingress
+ image: "{{ iop_ingress_container_image }}:{{ iop_ingress_container_tag }}"
+ state: quadlet
+ env:
+ INGRESS_VALID_UPLOAD_TYPES: "advisor,compliance,qpc,rhv,tower,leapp-reporting,xavier,playbook,playbook-sat,malware-detection,tasks"
+ INGRESS_KAFKA_BROKERS: "iop-core-kafka:9092"
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ INGRESS_STAGERIMPLEMENTATION: "filebased"
+ INGRESS_STORAGEFILESYSTEMPATH: "/var/tmp"
+ INGRESS_SERVICEBASEURL: "http://localhost:8080"
+ INGRESS_WEBPORT: "8080"
+ INGRESS_METRICSPORT: "3001"
+ network:
+ - iop-core-network
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Ingress Container
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Ingress service
+ ansible.builtin.systemd:
+ name: iop-core-ingress
+ enabled: true
+ state: started
diff --git a/src/roles/iop_inventory/defaults/main.yaml b/src/roles/iop_inventory/defaults/main.yaml
new file mode 100644
index 000000000..f04ea3a1d
--- /dev/null
+++ b/src/roles/iop_inventory/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+iop_inventory_container_image: "quay.io/iop/host-inventory"
+iop_inventory_container_tag: "foreman-3.16"
+
+iop_inventory_database_name: inventory_db
+iop_inventory_database_user: inventory_admin
+iop_inventory_database_password: CHANGEME
+iop_inventory_database_host: host.containers.internal
+iop_inventory_database_port: 5432
diff --git a/src/roles/iop_inventory/handlers/main.yaml b/src/roles/iop_inventory/handlers/main.yaml
new file mode 100644
index 000000000..356613290
--- /dev/null
+++ b/src/roles/iop_inventory/handlers/main.yaml
@@ -0,0 +1,20 @@
+---
+- name: Check if inventory services exist
+ ansible.builtin.systemd:
+ name: "{{ item }}"
+ register: iop_inventory_services_status
+ failed_when: false
+ listen: restart inventory
+ loop:
+ - iop-core-host-inventory-migrate
+ - iop-core-host-inventory
+ - iop-core-host-inventory-api
+ - iop-core-host-inventory-cleanup
+
+- name: Restart inventory services if they exist
+ ansible.builtin.systemd:
+ name: "{{ item.item }}"
+ state: restarted
+ when: item.status is defined and item.status.LoadState != "not-found"
+ listen: restart inventory
+ loop: "{{ iop_inventory_services_status.results }}"
diff --git a/src/roles/iop_inventory/tasks/main.yaml b/src/roles/iop_inventory/tasks/main.yaml
new file mode 100644
index 000000000..f03c61cc0
--- /dev/null
+++ b/src/roles/iop_inventory/tasks/main.yaml
@@ -0,0 +1,244 @@
+---
+- name: Pull Host Inventory container image
+ containers.podman.podman_image:
+ name: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: present
+
+- name: Create podman secret for inventory database username
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-username
+ data: "{{ iop_inventory_database_user }}"
+ notify: restart inventory
+
+- name: Create podman secret for inventory database password
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-password
+ data: "{{ iop_inventory_database_password }}"
+ notify: restart inventory
+
+- name: Create podman secret for inventory database name
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-name
+ data: "{{ iop_inventory_database_name }}"
+ notify: restart inventory
+
+- name: Create podman secret for inventory database host
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-host
+ data: "{{ iop_inventory_database_host }}"
+ notify: restart inventory
+
+- name: Create podman secret for inventory database port
+ containers.podman.podman_secret:
+ name: iop-core-host-inventory-database-port
+ data: "{{ iop_inventory_database_port }}"
+ notify: restart inventory
+
+- name: Deploy Host Inventory Database Migration Container
+ containers.podman.podman_container:
+ name: iop-core-host-inventory-migrate
+ image: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: quadlet
+ command: make upgrade_db
+ network:
+ - iop-core-network
+ env:
+ KAFKA_BOOTSTRAP_SERVERS: "PLAINTEXT://iop-core-kafka:9092"
+ USE_SUBMAN_ID: "true"
+ INVENTORY_DB_SSL_MODE: "disable"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-host-inventory-database-username,type=env,target=INVENTORY_DB_USER'
+ - 'iop-core-host-inventory-database-password,type=env,target=INVENTORY_DB_PASS'
+ - 'iop-core-host-inventory-database-name,type=env,target=INVENTORY_DB_NAME'
+ - 'iop-core-host-inventory-database-host,type=env,target=INVENTORY_DB_HOST'
+ - 'iop-core-host-inventory-database-port,type=env,target=INVENTORY_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Database Readiness and Migration Init Container
+ [Service]
+ Type=oneshot
+ RemainAfterExit=true
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy Host Inventory MQ Service Container
+ containers.podman.podman_container:
+ name: iop-core-host-inventory
+ image: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: quadlet
+ command: make run_inv_mq_service
+ network:
+ - iop-core-network
+ env:
+ KAFKA_BOOTSTRAP_SERVERS: "PLAINTEXT://iop-core-kafka:9092"
+ USE_SUBMAN_ID: "true"
+ INVENTORY_DB_SSL_MODE: "disable"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-host-inventory-database-username,type=env,target=INVENTORY_DB_USER'
+ - 'iop-core-host-inventory-database-password,type=env,target=INVENTORY_DB_PASS'
+ - 'iop-core-host-inventory-database-name,type=env,target=INVENTORY_DB_NAME'
+ - 'iop-core-host-inventory-database-host,type=env,target=INVENTORY_DB_HOST'
+ - 'iop-core-host-inventory-database-port,type=env,target=INVENTORY_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Host-Based Inventory Container
+ After=network-online.target iop-core-host-inventory-migrate.service
+ Requires=iop-core-host-inventory-migrate.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy Host Inventory API Container
+ containers.podman.podman_container:
+ name: iop-core-host-inventory-api
+ image: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: quadlet
+ command: python run_gunicorn.py
+ network:
+ - iop-core-network
+ env:
+ KAFKA_BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ LISTEN_PORT: "8081"
+ BYPASS_RBAC: "true"
+ USE_SUBMAN_ID: "true"
+ INVENTORY_DB_SSL_MODE: "disable"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-host-inventory-database-username,type=env,target=INVENTORY_DB_USER'
+ - 'iop-core-host-inventory-database-password,type=env,target=INVENTORY_DB_PASS'
+ - 'iop-core-host-inventory-database-name,type=env,target=INVENTORY_DB_NAME'
+ - 'iop-core-host-inventory-database-host,type=env,target=INVENTORY_DB_HOST'
+ - 'iop-core-host-inventory-database-port,type=env,target=INVENTORY_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Host-Based Inventory Web Container
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy Host Inventory Cleanup Container
+ containers.podman.podman_container:
+ name: iop-core-host-inventory-cleanup
+ image: "{{ iop_inventory_container_image }}:{{ iop_inventory_container_tag }}"
+ state: quadlet
+ command: make run_host_delete_access_tags
+ network:
+ - iop-core-network
+ env:
+ KAFKA_BOOTSTRAP_SERVERS: "PLAINTEXT://iop-core-kafka:9092"
+ USE_SUBMAN_ID: "true"
+ INVENTORY_DB_SSL_MODE: "disable"
+ PYTHONPATH: "/opt/app-root/src"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-core-host-inventory-database-username,type=env,target=INVENTORY_DB_USER'
+ - 'iop-core-host-inventory-database-password,type=env,target=INVENTORY_DB_PASS'
+ - 'iop-core-host-inventory-database-name,type=env,target=INVENTORY_DB_NAME'
+ - 'iop-core-host-inventory-database-host,type=env,target=INVENTORY_DB_HOST'
+ - 'iop-core-host-inventory-database-port,type=env,target=INVENTORY_DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Host Inventory Access Tags Cleanup Job
+ Wants=iop-core-host-inventory-api.service
+ After=iop-core-host-inventory-api.service
+
+- name: Create Host Inventory Cleanup Timer
+ ansible.builtin.copy:
+ dest: /etc/systemd/system/iop-core-host-inventory-cleanup.timer
+ content: |
+ [Unit]
+ Description=Host Inventory Access Tags Cleanup Timer
+
+ [Timer]
+ OnBootSec=10min
+ OnUnitActiveSec=24h
+ Persistent=true
+ RandomizedDelaySec=300
+
+ [Install]
+ WantedBy=timers.target
+ mode: '0644'
+ notify: restart inventory
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Host Inventory Migration service
+ ansible.builtin.systemd:
+ name: iop-core-host-inventory-migrate
+ enabled: true
+ state: started
+
+- name: Start Host Inventory MQ service
+ ansible.builtin.systemd:
+ name: iop-core-host-inventory
+ enabled: true
+ state: started
+
+- name: Start Host Inventory API service
+ ansible.builtin.systemd:
+ name: iop-core-host-inventory-api
+ enabled: true
+ state: started
+
+- name: Enable Host Inventory Cleanup Timer
+ ansible.builtin.systemd:
+ name: iop-core-host-inventory-cleanup.timer
+ enabled: true
+ state: started
+
+- name: Install PostgreSQL client for FDW operations
+ ansible.builtin.package:
+ name: postgresql
+ state: present
+
+- name: Enable postgres_fdw extension on inventory database
+ community.postgresql.postgresql_ext:
+ name: postgres_fdw
+ db: "{{ iop_inventory_database_name }}"
+ login_user: postgres
+ login_password: "{{ postgresql_admin_password }}"
+ login_host: localhost
+
+- name: Create inventory schema in inventory database
+ community.postgresql.postgresql_schema:
+ db: "{{ iop_inventory_database_name }}"
+ name: inventory
+ owner: "{{ iop_inventory_database_user }}"
+ login_user: postgres
+ login_password: "{{ postgresql_admin_password }}"
+ login_host: localhost
+
+- name: Create inventory.hosts view in inventory database
+ community.postgresql.postgresql_query:
+ db: "{{ iop_inventory_database_name }}"
+ login_user: postgres
+ login_password: "{{ postgresql_admin_password }}"
+ login_host: localhost
+ query: |
+ CREATE OR REPLACE VIEW "inventory"."hosts" AS SELECT
+ id,
+ account,
+ display_name,
+ created_on as created,
+ modified_on as updated,
+ stale_timestamp,
+ stale_timestamp + INTERVAL '1' DAY * '7' AS stale_warning_timestamp,
+ stale_timestamp + INTERVAL '1' DAY * '14' AS culled_timestamp,
+ tags_alt as tags,
+ system_profile_facts as system_profile,
+ (canonical_facts ->> 'insights_id')::uuid as insights_id,
+ reporter,
+ per_reporter_staleness,
+ org_id,
+ groups
+ FROM hbi.hosts WHERE (canonical_facts->'insights_id' IS NOT NULL);
diff --git a/src/roles/iop_kafka/defaults/main.yaml b/src/roles/iop_kafka/defaults/main.yaml
new file mode 100644
index 000000000..176858bc7
--- /dev/null
+++ b/src/roles/iop_kafka/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+iop_kafka_container_image: "quay.io/strimzi/kafka"
+iop_kafka_container_tag: "latest-kafka-3.7.1"
diff --git a/src/roles/iop_kafka/files/kafka/init b/src/roles/iop_kafka/files/kafka/init
new file mode 100644
index 000000000..c4238f820
--- /dev/null
+++ b/src/roles/iop_kafka/files/kafka/init
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+usage() {
+ echo "Usage: [--create | --check]"
+ echo " --create : Perform the creation action."
+ echo " --check : Perform the check action."
+ exit 1
+}
+
+topics=(
+ "platform.engine.results"
+ "platform.insights.rule-hits"
+ "platform.insights.rule-deactivation"
+ "platform.inventory.events"
+ "platform.inventory.host-ingress"
+ "platform.sources.event-stream"
+ "platform.playbook-dispatcher.runs"
+ "platform.upload.announce"
+ "platform.upload.validation"
+ "platform.logging.logs"
+ "platform.payload-status"
+ "platform.remediation-updates.vulnerability"
+ "vulnerability.evaluator.results"
+ "vulnerability.evaluator.recalc"
+ "vulnerability.evaluator.upload"
+ "vulnerability.grouper.inventory.upload"
+ "vulnerability.grouper.advisor.upload"
+)
+
+if ! [[ -v IOP_CORE_KAFKA ]]; then
+ IOP_CORE_KAFKA=iop-core-kafka
+fi
+
+kafka_cmd="./bin/kafka-topics.sh"
+kafka_bootstrap_server=$IOP_CORE_KAFKA:9092
+
+create_topics() {
+
+ echo -e "====================="
+ echo -e "Creating Kafka topics:"
+ for topic in "${topics[@]}"; do
+ echo -e "Creating topic ""$topic"
+ $kafka_cmd --create --if-not-exists --topic "$topic" --bootstrap-server $kafka_bootstrap_server --partitions 1 replication-factor 1 &
+ done
+ wait
+
+ echo -e "=========================="
+ echo -e "Listing all Kafka topics:"
+ $kafka_cmd --bootstrap-server $kafka_bootstrap_server --list
+}
+
+check_all_kafka_topics_exist() {
+ echo "Using Kafka command: $kafka_cmd" >&2
+
+ echo "Attempting to fetch existing topics..." >&2
+ local existing_topics_list
+ local list_output
+ list_output=$("$kafka_cmd" --bootstrap-server "$kafka_bootstrap_server" --list 2>&1)
+ local list_exit_code=$?
+
+ if [ $list_exit_code -ne 0 ]; then
+ echo "--------------------------------------------------" >&2
+ echo "Error: Failed to connect to Kafka or list topics." >&2
+ echo "Command failed: $kafka_cmd --bootstrap-server \"$kafka_bootstrap_server\" --list" >&2
+ echo "Exit code: $list_exit_code" >&2
+ echo "Output/Error:" >&2
+ echo "$list_output" >&2
+ echo "--------------------------------------------------" >&2
+ return 1
+ fi
+ existing_topics_list="$list_output"
+ echo "Successfully retrieved topic list." >&2
+
+ local missing_count=0
+ local missing_list=()
+
+ echo "Checking if all required topics exist..." >&2
+ for topic in "${topics[@]}"; do
+ if ! echo "$existing_topics_list" | grep -q -x -w "$topic"; then
+ echo " - Required topic '$topic' is MISSING." >&2
+ missing_list+=("$topic")
+ ((missing_count++))
+ else
+ echo " - Required topic '$topic' exists." >&2
+ fi
+ done
+
+ if [ $missing_count -eq 0 ]; then
+ echo "Result: All ${#topics[@]} required topics exist." >&2
+ return 0
+ else
+ echo "Result: Found $missing_count missing required topic(s)." >&2
+ echo "Missing topics:" >&2
+ printf " - %s\n" "${missing_list[@]}" >&2
+ return 2
+ fi
+}
+
+if [ "$#" -lt 1 ]; then
+ echo "Error: No operation specified." >&2
+ usage
+fi
+
+MODE="$1"
+shift
+
+case "$MODE" in
+ --create)
+ echo "Mode: Create"
+ echo "Performing create action..."
+
+ create_topics
+ ;;
+
+ --check)
+ echo "Mode: Check"
+ echo "Performing check action..."
+ check_all_kafka_topics_exist
+ ;;
+
+ *)
+ echo "Error: Invalid option '$MODE'." >&2
+ usage
+ ;;
+esac
diff --git a/src/roles/iop_kafka/files/kafka/init-start.sh b/src/roles/iop_kafka/files/kafka/init-start.sh
new file mode 100644
index 000000000..69e1bcd7f
--- /dev/null
+++ b/src/roles/iop_kafka/files/kafka/init-start.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Generate cluster UUID if it doesn't exist
+CLUSTER_ID_FILE="/var/lib/kafka/data/meta.properties"
+if [ ! -f "$CLUSTER_ID_FILE" ]; then
+ echo "Initializing KRaft storage..."
+ # Generate a cluster UUID
+ CLUSTER_UUID=$(bin/kafka-storage.sh random-uuid)
+ echo "Generated cluster UUID: $CLUSTER_UUID"
+
+ # Format the storage directory
+ bin/kafka-storage.sh format -t $CLUSTER_UUID -c /opt/kafka/config/kraft/server.properties
+else
+ echo "KRaft storage already initialized"
+fi
+
+# Start Kafka server
+echo "Starting Kafka server..."
+exec bin/kafka-server-start.sh /opt/kafka/config/kraft/server.properties
diff --git a/src/roles/iop_kafka/handlers/main.yaml b/src/roles/iop_kafka/handlers/main.yaml
new file mode 100644
index 000000000..a900c7452
--- /dev/null
+++ b/src/roles/iop_kafka/handlers/main.yaml
@@ -0,0 +1,7 @@
+---
+- name: Restart kafka
+ ansible.builtin.systemd:
+ name: iop-core-kafka
+ state: restarted
+ listen: restart kafka
+ when: ansible_facts.services['iop-core-kafka.service'] is defined
diff --git a/src/roles/iop_kafka/tasks/main.yaml b/src/roles/iop_kafka/tasks/main.yaml
new file mode 100644
index 000000000..cf58cdfce
--- /dev/null
+++ b/src/roles/iop_kafka/tasks/main.yaml
@@ -0,0 +1,74 @@
+---
+- name: Pull Kafka container image
+ containers.podman.podman_image:
+ name: "{{ iop_kafka_container_image }}:{{ iop_kafka_container_tag }}"
+ state: present
+
+- name: Create Kafka init script secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-kafka-init-start
+ data: "{{ lookup('ansible.builtin.file', 'kafka/init-start.sh') }}"
+ notify: restart kafka
+
+- name: Create Kafka server properties secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-kafka-server-properties
+ data: "{{ lookup('ansible.builtin.template', 'kafka/kraft.j2') }}"
+ notify: restart kafka
+
+- name: Create Kafka init topics script secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-core-kafka-init
+ data: "{{ lookup('ansible.builtin.file', 'kafka/init') }}"
+
+- name: Create Kafka data volume
+ containers.podman.podman_volume:
+ name: iop-core-kafka-data
+ state: present
+
+- name: Deploy Kafka container
+ containers.podman.podman_container:
+ name: iop-core-kafka
+ image: "{{ iop_kafka_container_image }}:{{ iop_kafka_container_tag }}"
+ state: quadlet
+ command: sh bin/init-start.sh
+ network:
+ - iop-core-network
+ env:
+ LOG_DIR: /tmp/kafka-logs
+ KAFKA_NODE_ID: "1"
+ volumes:
+ - "iop-core-kafka-data:/var/lib/kafka/data:rw"
+ secrets:
+ - 'iop-core-kafka-init-start,target=/opt/kafka/bin/init-start.sh,mode=0755,type=mount'
+ - 'iop-core-kafka-server-properties,target=/opt/kafka/config/kraft/server.properties,mode=0644,type=mount'
+ - 'iop-core-kafka-init,target=/opt/kafka/init.sh,mode=0755,type=mount'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Kafka Container
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Kafka service
+ ansible.builtin.systemd:
+ name: iop-core-kafka
+ enabled: true
+ state: started
+
+- name: Initialize Kafka topics
+ containers.podman.podman_container_exec:
+ name: iop-core-kafka
+ command: /opt/kafka/init.sh --create
+ register: iop_kafka_topics_result
+ changed_when: "'Creating topic' in iop_kafka_topics_result.stdout"
+ failed_when: iop_kafka_topics_result.rc != 0 and 'already exists' not in iop_kafka_topics_result.stderr
diff --git a/src/roles/iop_kafka/templates/kafka/kraft.j2 b/src/roles/iop_kafka/templates/kafka/kraft.j2
new file mode 100644
index 000000000..96b9041be
--- /dev/null
+++ b/src/roles/iop_kafka/templates/kafka/kraft.j2
@@ -0,0 +1,123 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This file contains a minimal set of configs to get a single-node Kafka cluster
+# up and running in KRaft mode. This file was automatically generated.
+#
+
+############################# Server Basics #############################
+
+# The role of this server. Setting this puts us in KRaft mode
+process.roles=broker,controller
+
+# The node id associated with this instance's roles
+node.id=1
+
+# The connect string for the controller quorum
+controller.quorum.voters=1@iop-core-kafka:9093
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. If not configured, the host name will be equal to the value of
+# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
+# FORMAT:
+# listeners = listener_name://host_name:port
+# EXAMPLE:
+# listeners = PLAINTEXT://your.host.name:9092
+listeners=PLAINTEXT://iop-core-kafka:9092,CONTROLLER://iop-core-kafka:9093
+
+# Name of listener used for communication between brokers.
+inter.broker.listener.name=PLAINTEXT
+
+# Listener name, hostname and port the broker will advertise to clients.
+# If not set, it uses the value for "listeners".
+advertised.listeners=PLAINTEXT://iop-core-kafka:9092
+
+# A comma-separated list of the names of the listeners used by the controller.
+# If no explicit mapping set, the default will be using PLAINTEXT protocol
+# This is required if running in KRaft mode.
+controller.listener.names=CONTROLLER
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
+
+############################# Log Basics #############################
+
+# A comma separated list of directories under which to store log files
+log.dirs=/var/lib/kafka/data
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
+offsets.topic.replication.factor=1
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are removed when they grow larger
+# than this size. -1 disables size-based retention.
+log.segment.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+log.retention.bytes=-1
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
diff --git a/src/roles/iop_network/defaults/main.yaml b/src/roles/iop_network/defaults/main.yaml
new file mode 100644
index 000000000..c4d62c427
--- /dev/null
+++ b/src/roles/iop_network/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+iop_network_name: "iop-core-network"
+iop_network_subnet: "10.130.0.0/24"
+iop_network_gateway: "10.130.0.1"
+iop_network_driver: "bridge"
diff --git a/src/roles/iop_network/tasks/main.yaml b/src/roles/iop_network/tasks/main.yaml
new file mode 100644
index 000000000..3f6676ec2
--- /dev/null
+++ b/src/roles/iop_network/tasks/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: Create IOP Core network
+ containers.podman.podman_network:
+ name: "{{ iop_network_name }}"
+ state: present
+ driver: "{{ iop_network_driver }}"
+ subnet: "{{ iop_network_subnet }}"
+ gateway: "{{ iop_network_gateway }}"
diff --git a/src/roles/iop_puptoo/defaults/main.yaml b/src/roles/iop_puptoo/defaults/main.yaml
new file mode 100644
index 000000000..3a9a12908
--- /dev/null
+++ b/src/roles/iop_puptoo/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+iop_puptoo_container_image: "quay.io/iop/puptoo"
+iop_puptoo_container_tag: "foreman-3.16"
diff --git a/src/roles/iop_puptoo/handlers/main.yaml b/src/roles/iop_puptoo/handlers/main.yaml
new file mode 100644
index 000000000..e3094888a
--- /dev/null
+++ b/src/roles/iop_puptoo/handlers/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: Restart puptoo
+ ansible.builtin.systemd:
+ name: iop-core-puptoo
+ state: restarted
+ when: ansible_facts.services['iop-core-puptoo.service'] is defined
diff --git a/src/roles/iop_puptoo/tasks/main.yaml b/src/roles/iop_puptoo/tasks/main.yaml
new file mode 100644
index 000000000..792b11c45
--- /dev/null
+++ b/src/roles/iop_puptoo/tasks/main.yaml
@@ -0,0 +1,37 @@
+---
+- name: Pull Puptoo container image
+ containers.podman.podman_image:
+ name: "{{ iop_puptoo_container_image }}:{{ iop_puptoo_container_tag }}"
+ state: present
+
+- name: Deploy Puptoo container
+ containers.podman.podman_container:
+ name: iop-core-puptoo
+ image: "{{ iop_puptoo_container_image }}:{{ iop_puptoo_container_tag }}"
+ state: quadlet
+ env:
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ DISABLE_REDIS: "True"
+ DISABLE_S3_UPLOAD: "True"
+ network:
+ - iop-core-network
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Puptoo Container
+ After=iop-core-kafka.service
+ Wants=iop-core-kafka.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Puptoo service
+ ansible.builtin.systemd:
+ name: iop-core-puptoo
+ enabled: true
+ state: started
diff --git a/src/roles/iop_remediation/defaults/main.yaml b/src/roles/iop_remediation/defaults/main.yaml
new file mode 100644
index 000000000..48cf75699
--- /dev/null
+++ b/src/roles/iop_remediation/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+iop_remediation_container_image: "quay.io/iop/remediations"
+iop_remediation_container_tag: "foreman-3.16"
+
+iop_remediation_database_name: remediations_db
+iop_remediation_database_user: remediations_user
+iop_remediation_database_password: CHANGEME
+iop_remediation_database_host: "host.containers.internal"
+iop_remediation_database_port: "5432"
diff --git a/src/roles/iop_remediation/handlers/main.yaml b/src/roles/iop_remediation/handlers/main.yaml
new file mode 100644
index 000000000..e49666e08
--- /dev/null
+++ b/src/roles/iop_remediation/handlers/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Check if remediation api service exists
+ ansible.builtin.systemd:
+ name: iop-service-remediations-api
+ register: iop_remediation_api_service_status
+ failed_when: false
+ listen: restart remediation
+
+- name: Restart remediation api
+ ansible.builtin.systemd:
+ name: iop-service-remediations-api
+ state: restarted
+ when: iop_remediation_api_service_status.status is defined and iop_remediation_api_service_status.status.LoadState != "not-found"
+ listen: restart remediation
diff --git a/src/roles/iop_remediation/tasks/main.yaml b/src/roles/iop_remediation/tasks/main.yaml
new file mode 100644
index 000000000..0abbda891
--- /dev/null
+++ b/src/roles/iop_remediation/tasks/main.yaml
@@ -0,0 +1,82 @@
+---
+- name: Pull Remediation container image
+ containers.podman.podman_image:
+ name: "{{ iop_remediation_container_image }}:{{ iop_remediation_container_tag }}"
+ state: present
+
+- name: Create Remediation database username secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-username
+ data: "{{ iop_remediation_database_user }}"
+ notify: restart remediation
+
+- name: Create Remediation database password secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-password
+ data: "{{ iop_remediation_database_password }}"
+ notify: restart remediation
+
+- name: Create Remediation database name secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-name
+ data: "{{ iop_remediation_database_name }}"
+ notify: restart remediation
+
+- name: Create Remediation database host secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-host
+ data: "{{ iop_remediation_database_host }}"
+ notify: restart remediation
+
+- name: Create Remediation database port secret
+ containers.podman.podman_secret:
+ state: present
+ name: iop-service-remediations-db-port
+ data: "{{ iop_remediation_database_port }}"
+ notify: restart remediation
+
+- name: Deploy Remediation API container
+ containers.podman.podman_container:
+ name: iop-service-remediations-api
+ image: "{{ iop_remediation_container_image }}:{{ iop_remediation_container_tag }}"
+ state: quadlet
+ network: host
+ command: sh -c "npm run db:migrate && exec node --max-http-header-size=16384 src/app.js"
+ env:
+ REDIS_ENABLED: "false"
+ RBAC_ENFORCE: "false"
+ CONTENT_SERVER_HOST: "http://iop-service-advisor-backend-api:8000"
+ ADVISOR_HOST: "http://iop-service-advisor-backend-api:8000"
+ INVENTORY_HOST: "http://iop-core-host-inventory-api:8081"
+ DB_SSL_ENABLED: "false"
+ REGISTRY_AUTH_FILE: "/etc/foreman/registry-auth.json"
+ secrets:
+ - 'iop-service-remediations-db-username,type=env,target=DB_USERNAME'
+ - 'iop-service-remediations-db-password,type=env,target=DB_PASSWORD'
+ - 'iop-service-remediations-db-name,type=env,target=DB_DATABASE'
+ - 'iop-service-remediations-db-host,type=env,target=DB_HOST'
+ - 'iop-service-remediations-db-port,type=env,target=DB_PORT'
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Remediations API
+ Wants=iop-core-host-inventory-api.service iop-service-advisor-backend-api.service
+ After=iop-core-host-inventory-api.service iop-service-advisor-backend-api.service
+ [Service]
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Remediation API service
+ ansible.builtin.systemd:
+ name: iop-service-remediations-api
+ enabled: true
+ state: started
diff --git a/src/roles/iop_vmaas/defaults/main.yaml b/src/roles/iop_vmaas/defaults/main.yaml
new file mode 100644
index 000000000..7fe67d4d3
--- /dev/null
+++ b/src/roles/iop_vmaas/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+iop_vmaas_container_image: "quay.io/iop/vmaas"
+iop_vmaas_container_tag: "foreman-3.16"
+
+iop_vmaas_database_name: vmaas_db
+iop_vmaas_database_user: vmaas_admin
+iop_vmaas_database_password: CHANGEME
+iop_vmaas_database_host: "host.containers.internal"
+iop_vmaas_database_port: "5432"
diff --git a/src/roles/iop_vmaas/handlers/main.yaml b/src/roles/iop_vmaas/handlers/main.yaml
new file mode 100644
index 000000000..195854cdb
--- /dev/null
+++ b/src/roles/iop_vmaas/handlers/main.yaml
@@ -0,0 +1,28 @@
+---
+- name: Check if vmaas reposcan service exists
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-reposcan
+ register: iop_vmaas_reposcan_service_status
+ failed_when: false
+ listen: restart vmaas
+
+- name: Restart vmaas reposcan
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-reposcan
+ state: restarted
+ when: iop_vmaas_reposcan_service_status.status is defined and iop_vmaas_reposcan_service_status.status.LoadState != "not-found"
+ listen: restart vmaas
+
+- name: Check if vmaas webapp-go service exists
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-webapp-go
+ register: iop_vmaas_webapp_service_status
+ failed_when: false
+ listen: restart vmaas
+
+- name: Restart vmaas webapp-go
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-webapp-go
+ state: restarted
+ when: iop_vmaas_webapp_service_status.status is defined and iop_vmaas_webapp_service_status.status.LoadState != "not-found"
+ listen: restart vmaas
diff --git a/src/roles/iop_vmaas/tasks/main.yaml b/src/roles/iop_vmaas/tasks/main.yaml
new file mode 100644
index 000000000..1ceff0330
--- /dev/null
+++ b/src/roles/iop_vmaas/tasks/main.yaml
@@ -0,0 +1,110 @@
+---
+- name: Create VMAAS database secrets
+ containers.podman.podman_secret:
+ name: "{{ item.name }}"
+ data: "{{ item.data }}"
+ state: present
+ loop:
+ - name: "iop-service-vmaas-reposcan-database-username"
+ data: "{{ iop_vmaas_database_user }}"
+ - name: "iop-service-vmaas-reposcan-database-password"
+ data: "{{ iop_vmaas_database_password }}"
+ - name: "iop-service-vmaas-reposcan-database-name"
+ data: "{{ iop_vmaas_database_name }}"
+ - name: "iop-service-vmaas-reposcan-database-host"
+ data: "{{ iop_vmaas_database_host }}"
+ - name: "iop-service-vmaas-reposcan-database-port"
+ data: "{{ iop_vmaas_database_port }}"
+ no_log: true
+
+- name: Create VMAAS data volume
+ containers.podman.podman_volume:
+ name: iop-service-vmaas-data
+ state: present
+
+- name: Deploy VMAAS Reposcan container
+ containers.podman.podman_container:
+ name: iop-service-vmaas-reposcan
+ image: "{{ iop_vmaas_container_image }}:{{ iop_vmaas_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ volumes:
+ - iop-service-vmaas-data:/data:rw
+ command: "/vmaas/entrypoint.sh database-upgrade reposcan"
+ env:
+ PROMETHEUS_PORT: "8085"
+ PROMETHEUS_MULTIPROC_DIR: "/tmp/prometheus_multiproc_dir"
+ SYNC_REPO_LIST_SOURCE: "katello"
+ SYNC_REPOS: "yes"
+ SYNC_CVE_MAP: "yes"
+ SYNC_CPE: "no"
+ SYNC_CSAF: "no"
+ SYNC_RELEASES: "no"
+ SYNC_RELEASE_GRAPH: "no"
+ KATELLO_URL: "http://iop-core-gateway:9090"
+ REDHAT_CVEMAP_URL: "http://iop-core-gateway:9090/pub/iop/data/meta/v1/cvemap.xml"
+ POSTGRESQL_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vmaas-reposcan-database-username,type=env,target=POSTGRESQL_USER"
+ - "iop-service-vmaas-reposcan-database-password,type=env,target=POSTGRESQL_PASSWORD"
+ - "iop-service-vmaas-reposcan-database-name,type=env,target=POSTGRESQL_DATABASE"
+ - "iop-service-vmaas-reposcan-database-host,type=env,target=POSTGRESQL_HOST"
+ - "iop-service-vmaas-reposcan-database-port,type=env,target=POSTGRESQL_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=VMAAS Reposcan Service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+
+- name: Deploy VMAAS Webapp-Go container
+ containers.podman.podman_container:
+ name: iop-service-vmaas-webapp-go
+ image: "{{ iop_vmaas_container_image }}:{{ iop_vmaas_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/vmaas/entrypoint.sh webapp-go"
+ env:
+ REPOSCAN_PUBLIC_URL: "http://iop-service-vmaas-reposcan:8000"
+ REPOSCAN_PRIVATE_URL: "http://iop-service-vmaas-reposcan:10000"
+ CSAF_UNFIXED_EVAL_ENABLED: "FALSE"
+ GIN_MODE: "release"
+ POSTGRESQL_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vmaas-reposcan-database-username,type=env,target=POSTGRESQL_USER"
+ - "iop-service-vmaas-reposcan-database-password,type=env,target=POSTGRESQL_PASSWORD"
+ - "iop-service-vmaas-reposcan-database-name,type=env,target=POSTGRESQL_DATABASE"
+ - "iop-service-vmaas-reposcan-database-host,type=env,target=POSTGRESQL_HOST"
+ - "iop-service-vmaas-reposcan-database-port,type=env,target=POSTGRESQL_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=VMAAS Webapp-Go Service
+ Wants=iop-service-vmaas-reposcan.service
+ After=iop-service-vmaas-reposcan.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start VMAAS Reposcan service
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-reposcan
+ enabled: true
+ state: started
+
+- name: Start VMAAS Webapp-Go service
+ ansible.builtin.systemd:
+ name: iop-service-vmaas-webapp-go
+ enabled: true
+ state: started
diff --git a/src/roles/iop_vulnerability/defaults/main.yaml b/src/roles/iop_vulnerability/defaults/main.yaml
new file mode 100644
index 000000000..03d1fbce0
--- /dev/null
+++ b/src/roles/iop_vulnerability/defaults/main.yaml
@@ -0,0 +1,13 @@
+---
+iop_vulnerability_container_image: "quay.io/iop/vulnerability-engine"
+iop_vulnerability_container_tag: "foreman-3.16"
+
+iop_vulnerability_database_name: vulnerability_db
+iop_vulnerability_database_user: vulnerability_admin
+iop_vulnerability_database_password: CHANGEME
+iop_vulnerability_database_host: "host.containers.internal"
+iop_vulnerability_database_port: "5432"
+
+# Taskomatic configuration
+iop_vulnerability_taskomatic_jobs: "stale_systems:5,delete_systems:30,cacheman:5"
+iop_vulnerability_taskomatic_startup: "cacheman"
diff --git a/src/roles/iop_vulnerability/handlers/main.yaml b/src/roles/iop_vulnerability/handlers/main.yaml
new file mode 100644
index 000000000..c2a665c1a
--- /dev/null
+++ b/src/roles/iop_vulnerability/handlers/main.yaml
@@ -0,0 +1,24 @@
+---
+- name: Check if vulnerability services exist
+ ansible.builtin.systemd:
+ name: "{{ item }}"
+ register: iop_vulnerability_services_status
+ failed_when: false
+ loop:
+ - iop-service-vuln-dbupgrade
+ - iop-service-vuln-manager
+ - iop-service-vuln-taskomatic
+ - iop-service-vuln-grouper
+ - iop-service-vuln-listener
+ - iop-service-vuln-evaluator-recalc
+ - iop-service-vuln-evaluator-upload
+ - iop-service-vuln-vmaas-sync
+ listen: restart vulnerability
+
+- name: Restart vulnerability services
+ ansible.builtin.systemd:
+ name: "{{ item.item }}"
+ state: restarted
+ when: item.status is defined and item.status.LoadState != "not-found"
+ loop: "{{ iop_vulnerability_services_status.results }}"
+ listen: restart vulnerability
diff --git a/src/roles/iop_vulnerability/tasks/main.yaml b/src/roles/iop_vulnerability/tasks/main.yaml
new file mode 100644
index 000000000..51a6fbbd8
--- /dev/null
+++ b/src/roles/iop_vulnerability/tasks/main.yaml
@@ -0,0 +1,379 @@
+---
+- name: Create vulnerability database secrets
+ containers.podman.podman_secret:
+ name: "{{ item.name }}"
+ data: "{{ item.data }}"
+ state: present
+ loop:
+ - name: "iop-service-vulnerability-database-username"
+ data: "{{ iop_vulnerability_database_user }}"
+ - name: "iop-service-vulnerability-database-password"
+ data: "{{ iop_vulnerability_database_password }}"
+ - name: "iop-service-vulnerability-database-name"
+ data: "{{ iop_vulnerability_database_name }}"
+ - name: "iop-service-vulnerability-database-host"
+ data: "{{ iop_vulnerability_database_host }}"
+ - name: "iop-service-vulnerability-database-port"
+ data: "{{ iop_vulnerability_database_port }}"
+ no_log: true
+
+- name: Set up Foreign Data Wrapper for vulnerability database
+ ansible.builtin.include_role:
+ name: iop_fdw
+ vars:
+ iop_fdw_database_name: "{{ iop_vulnerability_database_name }}"
+ iop_fdw_database_user: "{{ iop_vulnerability_database_user }}"
+ iop_fdw_database_password: "{{ iop_vulnerability_database_password }}"
+ iop_fdw_remote_database_name: "{{ iop_inventory_database_name }}"
+ iop_fdw_remote_user: "{{ iop_inventory_database_user }}"
+ iop_fdw_remote_password: "{{ iop_inventory_database_password }}"
+
+# 1. Database upgrade init container (oneshot)
+- name: Deploy Vulnerability Database Upgrade container
+ containers.podman.podman_container:
+ name: iop-service-vuln-dbupgrade
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "bash -c /engine/dbupgrade.sh"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ DISABLE_RBAC: "TRUE"
+ POSTGRES_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Database Upgrade Init Container
+ [Service]
+ Type=oneshot
+ RemainAfterExit=true
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 2. Manager service (main service)
+- name: Deploy Vulnerability Manager container
+ containers.podman.podman_container:
+ name: iop-service-vuln-manager
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh manager"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ DISABLE_RBAC: "TRUE"
+ POSTGRES_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Manager Service
+ After=network-online.target iop-service-vuln-dbupgrade.service
+ Requires=iop-service-vuln-dbupgrade.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 3. Taskomatic service (task scheduler)
+- name: Deploy Vulnerability Taskomatic container
+ containers.podman.podman_container:
+ name: iop-service-vuln-taskomatic
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh taskomatic"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ IS_FEDRAMP: "true"
+ JOBS: "{{ iop_vulnerability_taskomatic_jobs }}"
+ JOBS_STARTUP: "{{ iop_vulnerability_taskomatic_startup }}"
+ POSTGRES_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Taskomatic Service
+ Wants=iop-service-vuln-manager.service
+ After=iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 4. Grouper service
+- name: Deploy Vulnerability Grouper container
+ containers.podman.podman_container:
+ name: iop-service-vuln-grouper
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh grouper"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability-grouper"
+ PAYLOAD_TRACKER_TOPIC: "platform.payload-status"
+ GROUPER_INVENTORY_TOPIC: "vulnerability.grouper.inventory.upload"
+ GROUPER_ADVISOR_TOPIC: "vulnerability.grouper.advisor.upload"
+ PROMETHEUS_PORT: "8085"
+ POSTGRES_SSL_MODE: "disable"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Grouper Service
+ Wants=iop-service-vuln-manager.service
+ After=iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 5. Listener service (event listener)
+- name: Deploy Vulnerability Listener container
+ containers.podman.podman_container:
+ name: iop-service-vuln-listener
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh listener"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability-listener2"
+ EVENTS_TOPIC: "platform.inventory.events"
+ PAYLOAD_TRACKER_TOPIC: "platform.payload-status"
+ ADVISOR_RESULTS_TOPIC: "platform.engine.results"
+ MESSAGE_TOPIC: "vulnerability.evaluator.upload"
+ ALLOWED_REPORTERS: "puptoo,satellite"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Listener Service
+ Wants=iop-service-vuln-manager.service
+ After=iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 6. Evaluator (Recalc) service
+- name: Deploy Vulnerability Evaluator (Recalc) container
+ containers.podman.podman_container:
+ name: iop-service-vuln-evaluator-recalc
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh evaluator"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability"
+ PAYLOAD_TRACKER_TOPIC: "platform.payload-status"
+ REMEDIATION_UPDATES_TOPIC: "platform.remediation-updates.vulnerability"
+ EVALUATOR_RESULTS_TOPIC: "vulnerability.evaluator.results"
+ EVALUATOR_TOPIC: "vulnerability.evaluator.recalc"
+ VMAAS_HOST: "http://iop-service-vmaas-webapp-go:8000"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Evaluator (Recalc) Service
+ Wants=iop-service-vuln-manager.service
+ After=iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 7. Evaluator (Upload) service
+- name: Deploy Vulnerability Evaluator (Upload) container
+ containers.podman.podman_container:
+ name: iop-service-vuln-evaluator-upload
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh evaluator"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability"
+ PAYLOAD_TRACKER_TOPIC: "platform.payload-status"
+ REMEDIATION_UPDATES_TOPIC: "platform.remediation-updates.vulnerability"
+ EVALUATOR_RESULTS_TOPIC: "vulnerability.evaluator.results"
+ EVALUATOR_TOPIC: "vulnerability.evaluator.upload"
+ VMAAS_HOST: "http://iop-service-vmaas-webapp-go:8000"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability Evaluator (Upload) Service
+ Wants=iop-service-vuln-grouper.service iop-service-vuln-manager.service
+ After=iop-service-vuln-grouper.service iop-service-vuln-manager.service
+ [Service]
+ Restart=on-failure
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ [Install]
+ WantedBy=default.target
+ notify: restart vulnerability
+
+# 8. VMAAS Sync service (oneshot with timer)
+- name: Deploy Vulnerability VMAAS Sync container
+ containers.podman.podman_container:
+ name: iop-service-vuln-vmaas-sync
+ image: "{{ iop_vulnerability_container_image }}:{{ iop_vulnerability_container_tag }}"
+ state: quadlet
+ quadlet_dir: /etc/containers/systemd
+ network: iop-core-network
+ command: "/engine/entrypoint.sh vmaas-sync"
+ env:
+ UNLEASH_BOOTSTRAP_FILE: "develfeatureflags.json"
+ KAFKA_HOST: "iop-core-kafka"
+ KAFKA_PORT: "9092"
+ KAFKA_GROUP_ID: "vulnerability"
+ MESSAGE_TOPIC: "vulnerability.evaluator.recalc"
+ VMAAS_HOST: "http://iop-service-vmaas-webapp-go:8000"
+ secrets:
+ - "iop-service-vulnerability-database-username,type=env,target=POSTGRES_USER"
+ - "iop-service-vulnerability-database-password,type=env,target=POSTGRES_PASSWORD"
+ - "iop-service-vulnerability-database-name,type=env,target=POSTGRES_DB"
+ - "iop-service-vulnerability-database-host,type=env,target=POSTGRES_HOST"
+ - "iop-service-vulnerability-database-port,type=env,target=POSTGRES_PORT"
+ quadlet_options:
+ - |
+ [Unit]
+ Description=Vulnerability VMAAS Sync Job
+ Wants=iop-service-vmaas-webapp-go.service iop-service-vuln-manager.service
+ After=iop-service-vmaas-webapp-go.service iop-service-vuln-manager.service
+ [Service]
+ Type=oneshot
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ notify: restart vulnerability
+
+- name: Create VMAAS Sync systemd timer
+ ansible.builtin.copy:
+ dest: /etc/systemd/system/iop-service-vuln-vmaas-sync.timer
+ content: |
+ [Unit]
+ Description=Vulnerability VMAAS Sync Timer
+
+ [Timer]
+ OnCalendar=daily
+ RandomizedDelaySec=1h
+ Persistent=true
+
+ [Install]
+ WantedBy=timers.target
+ mode: '0644'
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Vulnerability Database Upgrade service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-dbupgrade
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Manager service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-manager
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Taskomatic service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-taskomatic
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Grouper service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-grouper
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Listener service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-listener
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Evaluator (Recalc) service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-evaluator-recalc
+ enabled: true
+ state: started
+
+- name: Start Vulnerability Evaluator (Upload) service
+ ansible.builtin.systemd:
+ name: iop-service-vuln-evaluator-upload
+ enabled: true
+ state: started
+
+- name: Enable VMAAS Sync timer
+ ansible.builtin.systemd:
+ name: iop-service-vuln-vmaas-sync.timer
+ enabled: true
+ state: started
diff --git a/src/roles/iop_vulnerability_frontend/defaults/main.yaml b/src/roles/iop_vulnerability_frontend/defaults/main.yaml
new file mode 100644
index 000000000..6bae1f548
--- /dev/null
+++ b/src/roles/iop_vulnerability_frontend/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+iop_vulnerability_frontend_container_image: "quay.io/iop/vulnerability-frontend"
+iop_vulnerability_frontend_container_tag: "foreman-3.16"
+iop_vulnerability_frontend_assets_path: "/var/www/iop/assets/apps/vulnerability"
+iop_vulnerability_frontend_source_path: "/srv/dist/."
diff --git a/src/roles/iop_vulnerability_frontend/tasks/main.yaml b/src/roles/iop_vulnerability_frontend/tasks/main.yaml
new file mode 100644
index 000000000..05d58858d
--- /dev/null
+++ b/src/roles/iop_vulnerability_frontend/tasks/main.yaml
@@ -0,0 +1,91 @@
+---
+- name: Pull Vulnerability Frontend container image
+ containers.podman.podman_image:
+ name: "{{ iop_vulnerability_frontend_container_image }}:{{ iop_vulnerability_frontend_container_tag }}"
+ state: present
+
+- name: Ensure parent assets directory exists
+ ansible.builtin.file:
+ path: /var/www/iop/assets/apps
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+
+- name: Ensure assets directory exists
+ ansible.builtin.file:
+ path: "{{ iop_vulnerability_frontend_assets_path }}"
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+
+- name: Create temporary container for asset extraction
+ containers.podman.podman_container:
+ name: iop-vulnerability-frontend-temp
+ image: "{{ iop_vulnerability_frontend_container_image }}:{{ iop_vulnerability_frontend_container_tag }}"
+ state: created
+
+- name: Extract vulnerability frontend assets from container
+ containers.podman.podman_container_copy:
+ container: iop-vulnerability-frontend-temp
+ src: "{{ iop_vulnerability_frontend_source_path }}"
+ dest: "{{ iop_vulnerability_frontend_assets_path }}"
+ from_container: true
+
+- name: Restore SELinux context for vulnerability frontend assets
+ ansible.builtin.command:
+ cmd: restorecon -R "{{ iop_vulnerability_frontend_assets_path }}"
+ when: ansible_facts['selinux']['status'] == "enabled"
+ changed_when: false
+
+- name: Remove temporary container
+ containers.podman.podman_container:
+ name: iop-vulnerability-frontend-temp
+ state: absent
+
+- name: Set ownership of vulnerability frontend assets
+ ansible.builtin.file:
+ path: "{{ iop_vulnerability_frontend_assets_path }}"
+ owner: root
+ group: root
+ recurse: true
+
+- name: Ensure Apache SSL config directory exists
+ ansible.builtin.file:
+ path: /etc/httpd/conf.d/05-foreman-ssl.d
+ state: directory
+ mode: '0755'
+
+- name: Configure Apache for vulnerability frontend assets
+ ansible.builtin.copy:
+ dest: /etc/httpd/conf.d/05-foreman-ssl.d/vulnerability-frontend.conf
+ content: |
+ # IOP Vulnerability Frontend Assets Configuration
+ Alias /assets/apps/vulnerability {{ iop_vulnerability_frontend_assets_path }}
+ ProxyPass /assets/apps/vulnerability !
+
+
+ Options SymLinksIfOwnerMatch
+ AllowOverride None
+ Require all granted
+
+ # Use standard http expire header for assets instead of ETag
+
+ Header unset ETag
+ FileETag None
+ ExpiresActive On
+ ExpiresDefault "access plus 1 year"
+
+
+ # Return compressed assets if they are precompiled
+ RewriteEngine On
+ # Make sure the browser supports gzip encoding and file with .gz added
+ # does exist on disc before we rewrite with the extension
+ RewriteCond %{HTTP:Accept-Encoding} \b(x-)?gzip\b
+ RewriteCond %{REQUEST_FILENAME} \.(css|js|svg)$
+ RewriteCond %{REQUEST_FILENAME}.gz -s
+ RewriteRule ^(.+) $1.gz [L]
+
+ mode: '0644'
+ notify: "httpd : Restart httpd"
diff --git a/src/roles/iop_yuptoo/defaults/main.yaml b/src/roles/iop_yuptoo/defaults/main.yaml
new file mode 100644
index 000000000..5bf9c3860
--- /dev/null
+++ b/src/roles/iop_yuptoo/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+iop_yuptoo_container_image: "quay.io/iop/yuptoo"
+iop_yuptoo_container_tag: "foreman-3.16"
diff --git a/src/roles/iop_yuptoo/handlers/main.yaml b/src/roles/iop_yuptoo/handlers/main.yaml
new file mode 100644
index 000000000..99a098e2f
--- /dev/null
+++ b/src/roles/iop_yuptoo/handlers/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: Restart yuptoo
+ ansible.builtin.systemd:
+ name: iop-core-yuptoo
+ state: restarted
+ when: ansible_facts.services['iop-core-yuptoo.service'] is defined
diff --git a/src/roles/iop_yuptoo/tasks/main.yaml b/src/roles/iop_yuptoo/tasks/main.yaml
new file mode 100644
index 000000000..5cdd9b6b0
--- /dev/null
+++ b/src/roles/iop_yuptoo/tasks/main.yaml
@@ -0,0 +1,36 @@
+---
+- name: Pull Yuptoo container image
+ containers.podman.podman_image:
+ name: "{{ iop_yuptoo_container_image }}:{{ iop_yuptoo_container_tag }}"
+ state: present
+
+- name: Deploy Yuptoo container
+ containers.podman.podman_container:
+ name: iop-core-yuptoo
+ image: "{{ iop_yuptoo_container_image }}:{{ iop_yuptoo_container_tag }}"
+ state: quadlet
+ command: python -m main
+ env:
+ BOOTSTRAP_SERVERS: "iop-core-kafka:9092"
+ BYPASS_PAYLOAD_EXPIRATION: "true"
+ network:
+ - iop-core-network
+ quadlet_options:
+ - |
+ [Unit]
+ Description=IOP Core Yuptoo Container
+ [Service]
+ Environment=REGISTRY_AUTH_FILE=/etc/foreman/registry-auth.json
+ Restart=on-failure
+ [Install]
+ WantedBy=default.target
+
+- name: Run daemon reload to make Quadlet create the service files
+ ansible.builtin.systemd:
+ daemon_reload: true
+
+- name: Start Yuptoo service
+ ansible.builtin.systemd:
+ name: iop-core-yuptoo
+ enabled: true
+ state: started
diff --git a/src/vars/base.yaml b/src/vars/base.yaml
index 16765f9a7..a522f444d 100644
--- a/src/vars/base.yaml
+++ b/src/vars/base.yaml
@@ -34,3 +34,7 @@ hammer_ca_certificate: "{{ server_ca_certificate }}"
hammer_plugins: "{{ enabled_features | features_to_hammer_plugins }}"
foreman_proxy_plugins: "{{ enabled_features | features_to_foreman_proxy_plugins }}"
+
+iop_core_foreman_url: "{{ foreman_url }}"
+iop_core_foreman_admin_username: "{{ foreman_initial_admin_username }}"
+iop_core_foreman_admin_password: "{{ foreman_initial_admin_password }}"
diff --git a/src/vars/database.yml b/src/vars/database.yml
index 3f4a73cd1..2a89bed01 100644
--- a/src/vars/database.yml
+++ b/src/vars/database.yml
@@ -37,9 +37,9 @@ postgresql_databases:
- name: "{{ pulp_database_name }}"
owner: "{{ pulp_database_user }}"
postgresql_users:
- - name: "{{ candlepin_database_name }}"
+ - name: "{{ candlepin_database_user }}"
password: "{{ candlepin_database_password }}"
- - name: "{{ foreman_database_name }}"
+ - name: "{{ foreman_database_user }}"
password: "{{ foreman_database_password }}"
- - name: "{{ pulp_database_name }}"
+ - name: "{{ pulp_database_user }}"
password: "{{ pulp_database_password }}"
diff --git a/src/vars/database_iop.yml b/src/vars/database_iop.yml
new file mode 100644
index 000000000..792333b20
--- /dev/null
+++ b/src/vars/database_iop.yml
@@ -0,0 +1,56 @@
+---
+iop_database_host: host.containers.internal
+iop_database_port: 5432
+
+iop_inventory_database_host: "{{ iop_database_host }}"
+iop_inventory_database_port: "{{ iop_database_port }}"
+iop_inventory_database_name: inventory_db
+iop_inventory_database_user: inventory_admin
+iop_inventory_database_password: CHANGEME
+
+iop_advisor_database_host: "{{ iop_database_host }}"
+iop_advisor_database_port: "{{ iop_database_port }}"
+iop_advisor_database_name: advisor_db
+iop_advisor_database_user: advisor_user
+iop_advisor_database_password: CHANGEME
+
+iop_remediation_database_host: "{{ iop_database_host }}"
+iop_remediation_database_port: "{{ iop_database_port }}"
+iop_remediation_database_name: remediations_db
+iop_remediation_database_user: remediations_user
+iop_remediation_database_password: CHANGEME
+
+iop_vmaas_database_host: "{{ iop_database_host }}"
+iop_vmaas_database_port: "{{ iop_database_port }}"
+iop_vmaas_database_name: vmaas_db
+iop_vmaas_database_user: vmaas_admin
+iop_vmaas_database_password: CHANGEME
+
+iop_vulnerability_database_host: "{{ iop_database_host }}"
+iop_vulnerability_database_port: "{{ iop_database_port }}"
+iop_vulnerability_database_name: vulnerability_db
+iop_vulnerability_database_user: vulnerability_admin
+iop_vulnerability_database_password: CHANGEME
+
+iop_postgresql_databases:
+ - name: "{{ iop_inventory_database_name }}"
+ owner: "{{ iop_inventory_database_user }}"
+ - name: "{{ iop_advisor_database_name }}"
+ owner: "{{ iop_advisor_database_user }}"
+ - name: "{{ iop_remediation_database_name }}"
+ owner: "{{ iop_remediation_database_user }}"
+ - name: "{{ iop_vmaas_database_name }}"
+ owner: "{{ iop_vmaas_database_user }}"
+ - name: "{{ iop_vulnerability_database_name }}"
+ owner: "{{ iop_vulnerability_database_user }}"
+iop_postgresql_users:
+ - name: "{{ iop_inventory_database_user }}"
+ password: "{{ iop_inventory_database_password }}"
+ - name: "{{ iop_advisor_database_user }}"
+ password: "{{ iop_advisor_database_password }}"
+ - name: "{{ iop_remediation_database_user }}"
+ password: "{{ iop_remediation_database_password }}"
+ - name: "{{ iop_vmaas_database_user }}"
+ password: "{{ iop_vmaas_database_password }}"
+ - name: "{{ iop_vulnerability_database_user }}"
+ password: "{{ iop_vulnerability_database_password }}"
diff --git a/src/vars/default_certificates.yml b/src/vars/default_certificates.yml
index 09f47c5c9..04a3f8142 100644
--- a/src/vars/default_certificates.yml
+++ b/src/vars/default_certificates.yml
@@ -11,3 +11,13 @@ client_key: "{{ certificates_ca_directory }}/private/{{ ansible_facts['fqdn'] }}
client_ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt"
localhost_key: "{{ certificates_ca_directory }}/private/localhost.key"
localhost_certificate: "{{ certificates_ca_directory }}/certs/localhost.crt"
+localhost_client_key: "{{ certificates_ca_directory }}/private/localhost-client.key"
+localhost_client_certificate: "{{ certificates_ca_directory }}/certs/localhost-client.crt"
+
+# IOP Gateway certificate paths - uses localhost certs to match puppet-iop behavior
+iop_gateway_server_certificate: "{{ certificates_ca_directory }}/certs/localhost.crt"
+iop_gateway_server_key: "{{ certificates_ca_directory }}/private/localhost.key"
+iop_gateway_server_ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt"
+iop_gateway_client_certificate: "{{ certificates_ca_directory }}/certs/localhost-client.crt"
+iop_gateway_client_key: "{{ certificates_ca_directory }}/private/localhost-client.key"
+iop_gateway_client_ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt"
diff --git a/src/vars/images.yml b/src/vars/images.yml
index c46f19f82..e4419e3a9 100644
--- a/src/vars/images.yml
+++ b/src/vars/images.yml
@@ -6,7 +6,7 @@ postgresql_registry_auth_file: "{{ registry_auth_file }}"
pulp_registry_auth_file: "{{ registry_auth_file }}"
redis_registry_auth_file: "{{ registry_auth_file }}"
-container_tag_stream: "3.18"
+container_tag_stream: "nightly"
candlepin_container_image: quay.io/foreman/candlepin
candlepin_container_tag: "foreman-{{ container_tag_stream }}"
foreman_container_image: quay.io/foreman/foreman
diff --git a/src/vars/installer_certificates.yml b/src/vars/installer_certificates.yml
index c6ab83af3..179dcdfdc 100644
--- a/src/vars/installer_certificates.yml
+++ b/src/vars/installer_certificates.yml
@@ -10,3 +10,11 @@ client_key: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'
client_ca_certificate: "{{ ca_certificate }}"
localhost_key: "/root/ssl-build/localhost/localhost-tomcat.key"
localhost_certificate: "/root/ssl-build/localhost/localhost-tomcat.crt"
+
+# IOP Gateway certificate paths - uses localhost certs to match puppet-iop behavior
+iop_gateway_server_certificate: "/root/ssl-build/localhost/localhost-iop-core-gateway-server.crt"
+iop_gateway_server_key: "/root/ssl-build/localhost/localhost-iop-core-gateway-server.key"
+iop_gateway_server_ca_certificate: "/root/ssl-build/katello-default-ca.crt"
+iop_gateway_client_certificate: "/root/ssl-build/localhost/localhost-iop-core-gateway-client.crt"
+iop_gateway_client_key: "/root/ssl-build/localhost/localhost-iop-core-gateway-client.key"
+iop_gateway_client_ca_certificate: "/root/ssl-build/katello-server-ca.crt"
diff --git a/tests/conftest.py b/tests/conftest.py
index d9ed914cc..1fae806c6 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -6,6 +6,7 @@
import pytest
import testinfra
import yaml
+import os
from jinja2 import Environment, FileSystemLoader, select_autoescape
@@ -171,3 +172,33 @@ def wait_for_tasks(foremanapi, search=None):
def wait_for_metadata_generate(foremanapi):
wait_for_tasks(foremanapi, 'label = Actions::Katello::Repository::MetadataGenerate')
+
+
+def is_iop_enabled():
+ test_dir = os.path.dirname(os.path.abspath(__file__))
+ foremanctl_dir = os.path.dirname(test_dir)
+ params_file = os.path.join(foremanctl_dir, '.var', 'lib', 'foremanctl', 'parameters.yaml')
+
+ if os.path.exists(params_file):
+ with open(params_file, 'r') as f:
+ params = yaml.safe_load(f)
+ features = params.get('features', [])
+ if isinstance(features, str):
+ features = features.split()
+ return 'iop' in features
+
+ return False
+
+
+def pytest_configure(config):
+ config.addinivalue_line("markers", "iop: tests requiring IOP to be enabled")
+
+
+def pytest_collection_modifyitems(config, items):
+ if is_iop_enabled():
+ return
+
+ skip_iop = pytest.mark.skip(reason="IOP not enabled - skipping IOP tests ('iop' not in enabled_features)")
+ for item in items:
+ if "iop" in item.keywords:
+ item.add_marker(skip_iop)
diff --git a/tests/fixtures/help/checks.txt b/tests/fixtures/help/checks.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/iop/__init__.py b/tests/iop/__init__.py
new file mode 100644
index 000000000..d558ba0af
--- /dev/null
+++ b/tests/iop/__init__.py
@@ -0,0 +1 @@
+# IOP Test Package
\ No newline at end of file
diff --git a/tests/iop/test_advisor.py b/tests/iop/test_advisor.py
new file mode 100644
index 000000000..970598fd4
--- /dev/null
+++ b/tests/iop/test_advisor.py
@@ -0,0 +1,142 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_advisor_backend_api_service(server):
+ service = server.service("iop-service-advisor-backend-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_advisor_backend_service(server):
+ service = server.service("iop-service-advisor-backend-service")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_advisor_api_container(server):
+ result = server.run("podman ps --format '{{.Names}}' | grep iop-service-advisor-backend-api")
+ assert result.succeeded
+ assert "iop-service-advisor-backend-api" in result.stdout
+
+
+def test_advisor_service_container(server):
+ result = server.run("podman ps --format '{{.Names}}' | grep iop-service-advisor-backend-service")
+ assert result.succeeded
+ assert "iop-service-advisor-backend-service" in result.stdout
+
+
+def test_advisor_api_quadlet_file(server):
+ quadlet_file = server.file("/etc/containers/systemd/iop-service-advisor-backend-api.container")
+ assert quadlet_file.exists
+ assert quadlet_file.is_file
+
+
+def test_advisor_service_quadlet_file(server):
+ quadlet_file = server.file("/etc/containers/systemd/iop-service-advisor-backend-service.container")
+ assert quadlet_file.exists
+ assert quadlet_file.is_file
+
+
+def test_advisor_api_service_dependencies(server):
+ result = server.run("systemctl show iop-service-advisor-backend-api --property=After")
+ assert result.succeeded
+ assert "iop-core-kafka.service" in result.stdout
+
+
+def test_advisor_service_dependencies(server):
+ result = server.run("systemctl show iop-service-advisor-backend-service --property=After")
+ assert result.succeeded
+ assert "iop-core-kafka.service" in result.stdout
+
+
+def test_advisor_database_secrets(server):
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-service-advisor-backend-database-username" in result.stdout
+ assert "iop-service-advisor-backend-database-password" in result.stdout
+ assert "iop-service-advisor-backend-database-name" in result.stdout
+ assert "iop-service-advisor-backend-database-host" in result.stdout
+ assert "iop-service-advisor-backend-database-port" in result.stdout
+
+
+def test_advisor_api_kafka_connectivity(server):
+ result = server.run("podman logs iop-service-advisor-backend-api 2>&1 | grep -i 'kafka\\|bootstrap'")
+ assert result.succeeded
+
+
+def test_advisor_service_kafka_connectivity(server):
+ result = server.run("podman logs iop-service-advisor-backend-service 2>&1 | grep -i 'kafka\\|bootstrap'")
+ assert result.succeeded
+
+
+def test_advisor_api_port_configured(server):
+ result = server.run("podman inspect iop-service-advisor-backend-api --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "PORT=8000" in result.stdout
+
+
+def test_advisor_fdw_foreign_server_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT * FROM pg_foreign_server WHERE srvname = 'hbi_server';\"")
+ assert result.succeeded
+ assert "hbi_server" in result.stdout
+
+
+def test_advisor_fdw_user_mapping_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT * FROM information_schema.user_mappings WHERE foreign_server_name = 'hbi_server';\"")
+ assert result.succeeded
+ assert "advisor_user" in result.stdout
+
+
+def test_advisor_fdw_foreign_table_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"\\det inventory_source.*\"")
+ assert result.succeeded
+ assert "hosts" in result.stdout
+
+
+def test_advisor_fdw_inventory_view_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"\\dv inventory.*\"")
+ assert result.succeeded
+ assert "hosts" in result.stdout
+
+
+def test_advisor_fdw_inventory_view_queryable(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT 1 FROM inventory.hosts LIMIT 1;\"")
+ assert result.rc == 0
+
+
+# Additional comprehensive FDW tests (beyond puppet-iop baseline)
+def test_advisor_fdw_postgres_fdw_extension(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT extname FROM pg_extension WHERE extname = 'postgres_fdw';\"")
+ assert result.succeeded
+ assert "postgres_fdw" in result.stdout
+
+
+def test_advisor_fdw_postgres_user_mapping_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT usename FROM pg_user_mappings WHERE srvname = 'hbi_server' AND usename = 'postgres';\"")
+ assert result.succeeded
+ assert "postgres" in result.stdout
+
+
+def test_advisor_fdw_inventory_source_schema_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'inventory_source';\"")
+ assert result.succeeded
+ assert "inventory_source" in result.stdout
+
+
+def test_advisor_fdw_inventory_schema_exists(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'inventory';\"")
+ assert result.succeeded
+ assert "inventory" in result.stdout
+
+
+def test_advisor_fdw_permissions_on_view(server):
+ result = server.run("podman exec postgresql psql advisor_db -c \"SELECT privilege_type FROM information_schema.table_privileges WHERE grantee = 'advisor_user' AND table_schema = 'inventory' AND table_name = 'hosts';\"")
+ assert result.succeeded
+ assert "SELECT" in result.stdout
+
+
+def test_advisor_api_endpoint(server):
+ result = server.run("podman run --network=iop-core-network --rm quay.io/iop/advisor-backend:latest curl -s -o /dev/null -w '%{http_code}' http://iop-service-advisor-backend-api:8000/ 2>/dev/null || echo '000'")
+ assert result.stdout.strip() != "000"
diff --git a/tests/iop/test_advisor_frontend.py b/tests/iop/test_advisor_frontend.py
new file mode 100644
index 000000000..a111b61f0
--- /dev/null
+++ b/tests/iop/test_advisor_frontend.py
@@ -0,0 +1,28 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_advisor_frontend_assets_directory(server):
+ assets_dir = server.file("/var/www/iop/assets/apps/advisor")
+ assert assets_dir.exists
+ assert assets_dir.is_directory
+ assert assets_dir.mode == 0o755
+
+
+def test_advisor_frontend_app_info_file(server):
+ app_info_file = server.file("/var/www/iop/assets/apps/advisor/app.info.json")
+
+ assert app_info_file.exists
+ assert app_info_file.is_file
+
+
+def test_advisor_frontend_javascript_assets_accessible(server):
+ result = server.run("find /var/www/iop/assets/apps/advisor -name '*.js' | head -1")
+ assert result.succeeded
+ assert result.stdout.strip()
+ js_file = result.stdout.strip().replace("/var/www/iop", "")
+ curl_result = server.run(f"curl -s -o /dev/null -w '%{{http_code}}' -k https://localhost{js_file}")
+ assert curl_result.succeeded
+ http_code = curl_result.stdout.strip()
+ assert http_code in ["200"]
diff --git a/tests/iop/test_engine.py b/tests/iop/test_engine.py
new file mode 100644
index 000000000..f6adf0983
--- /dev/null
+++ b/tests/iop/test_engine.py
@@ -0,0 +1,37 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_engine_service(server):
+ service = server.service("iop-core-engine")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_engine_secret(server):
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-core-engine-config-yml" in result.stdout
+
+
+def test_engine_config_content(server):
+ result = server.run("podman secret inspect iop-core-engine-config-yml --showsecret")
+ assert result.succeeded
+
+ config_data = result.stdout.strip()
+ assert "insights.specs.default" in config_data
+ assert "insights_kafka_service.rules" in config_data
+ assert "iop-core-kafka:9092" in config_data
+
+
+def test_engine_service_dependencies(server):
+ result = server.run("systemctl show iop-core-engine --property=After")
+ assert result.succeeded
+ assert "iop-core-ingress.service" in result.stdout
+ assert "iop-core-kafka.service" in result.stdout
+
+
+def test_engine_kafka_connectivity(server):
+ result = server.run("podman logs iop-core-engine 2>&1 | grep -i 'kafka\\|bootstrap'")
+ assert result.succeeded
\ No newline at end of file
diff --git a/tests/iop/test_gateway.py b/tests/iop/test_gateway.py
new file mode 100644
index 000000000..3bdb73535
--- /dev/null
+++ b/tests/iop/test_gateway.py
@@ -0,0 +1,32 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_gateway_service(server):
+ service = server.service("iop-core-gateway")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_gateway_port(server):
+ addr = server.addr("localhost")
+ assert addr.port("24443").is_reachable
+
+
+def test_gateway_secrets(server):
+ secrets = [
+ 'iop-core-gateway-server-cert',
+ 'iop-core-gateway-server-key',
+ 'iop-core-gateway-server-ca-cert',
+ 'iop-core-gateway-client-cert',
+ 'iop-core-gateway-client-key',
+ 'iop-core-gateway-client-ca-cert',
+ 'iop-core-gateway-relay-conf'
+ ]
+
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+
+ for secret_name in secrets:
+ assert secret_name in result.stdout
\ No newline at end of file
diff --git a/tests/iop/test_ingress.py b/tests/iop/test_ingress.py
new file mode 100644
index 000000000..cb61cb46e
--- /dev/null
+++ b/tests/iop/test_ingress.py
@@ -0,0 +1,15 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_ingress_service(server):
+ service = server.service("iop-core-ingress")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_ingress_http_endpoint(server):
+ result = server.run("podman run --rm quay.io/iop/ingress:latest curl -s -o /dev/null -w '%{http_code}' http://iop-core-ingress:8080/")
+ if result.succeeded:
+ assert "200" in result.stdout
\ No newline at end of file
diff --git a/tests/iop/test_integration.py b/tests/iop/test_integration.py
new file mode 100644
index 000000000..8a8dacac2
--- /dev/null
+++ b/tests/iop/test_integration.py
@@ -0,0 +1,131 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_iop_core_kafka_service(server):
+ service = server.service("iop-core-kafka")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_core_ingress_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-ingress").succeeded
+ if service_exists:
+ service = server.service("iop-core-ingress")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_ingress_endpoint(server):
+ result = server.run("curl -f http://localhost:8080/ 2>/dev/null || echo 'Ingress not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_core_puptoo_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-puptoo").succeeded
+ if service_exists:
+ service = server.service("iop-core-puptoo")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_puptoo_metrics_endpoint(server):
+ result = server.run("curl -f http://localhost:8000/metrics 2>/dev/null || echo 'Puptoo not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_core_yuptoo_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-yuptoo").succeeded
+ if service_exists:
+ service = server.service("iop-core-yuptoo")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_yuptoo_endpoint(server):
+ result = server.run("curl -f http://localhost:5005/ 2>/dev/null || echo 'Yuptoo not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_core_engine_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-engine").succeeded
+ if service_exists:
+ service = server.service("iop-core-engine")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_core_gateway_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-gateway").succeeded
+ if service_exists:
+ service = server.service("iop-core-gateway")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_gateway_endpoint(server):
+ result = server.run("curl -f http://localhost:24443/ 2>/dev/null || echo 'Gateway not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_gateway_api_ingress_endpoint(server):
+ result = server.run("curl -f http://localhost:24443/api/ingress 2>/dev/null || echo 'Gateway API ingress not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_gateway_https_cert_auth(server, certificates):
+ result = server.run(f"curl -s -o /dev/null -w '%{{http_code}}' https://localhost:24443/ --cert {certificates['iop_gateway_client_certificate']} --key {certificates['iop_gateway_client_key']} --cacert {certificates['iop_gateway_client_ca_certificate']} 2>/dev/null || echo '000'")
+ assert "200" in result.stdout
+
+
+def test_iop_core_host_inventory_api_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-core-host-inventory-api").succeeded
+ if service_exists:
+ service = server.service("iop-core-host-inventory-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_inventory_mq_endpoint(server):
+ result = server.run("podman run --network=iop-core-network quay.io/iop/host-inventory:latest curl http://iop-core-host-inventory:9126/ 2>/dev/null || echo 'Host inventory MQ not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_inventory_api_health_endpoint(server):
+ result = server.run("podman run --network=iop-core-network quay.io/iop/host-inventory curl -s -o /dev/null -w '%{http_code}' http://iop-core-host-inventory-api:8081/health 2>/dev/null || echo '000'")
+ assert "200" in result.stdout
+
+
+def test_iop_service_advisor_backend_api_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-service-advisor-backend-api").succeeded
+ if service_exists:
+ service = server.service("iop-service-advisor-backend-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_service_advisor_backend_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-service-advisor-backend-service").succeeded
+ if service_exists:
+ service = server.service("iop-service-advisor-backend-service")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_advisor_api_endpoint(server):
+ result = server.run("podman run --network=iop-core-network --rm quay.io/iop/advisor-backend:latest curl -f http://iop-service-advisor-backend-api:8000/ 2>/dev/null || echo 'Advisor API not yet responding'")
+ assert result.rc == 0
+
+
+def test_iop_service_remediations_api_service(server):
+ service_exists = server.run("systemctl list-units --type=service | grep iop-service-remediations-api").succeeded
+ if service_exists:
+ service = server.service("iop-service-remediations-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_iop_remediations_api_endpoint(server):
+ result = server.run("curl -f http://localhost:9002/ 2>/dev/null || echo 'Remediations API not yet responding'")
+ assert result.rc == 0
diff --git a/tests/iop/test_inventory.py b/tests/iop/test_inventory.py
new file mode 100644
index 000000000..573316940
--- /dev/null
+++ b/tests/iop/test_inventory.py
@@ -0,0 +1,68 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_inventory_migrate_service(server):
+ service = server.service("iop-core-host-inventory-migrate")
+ assert service.is_enabled
+
+
+def test_inventory_mq_service(server):
+ service = server.service("iop-core-host-inventory")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_inventory_api_service(server):
+ service = server.service("iop-core-host-inventory-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_inventory_service_dependencies(server):
+ result = server.run("systemctl show iop-core-host-inventory --property=After")
+ assert result.succeeded
+ assert "iop-core-host-inventory-migrate.service" in result.stdout
+
+
+def test_inventory_api_endpoint(server):
+ result = server.run("podman run --rm quay.io/iop/host-inventory:latest curl -s -o /dev/null -w '%{http_code}' http://iop-core-host-inventory-api:8081/health")
+ if result.succeeded:
+ assert "200" in result.stdout
+
+
+def test_inventory_hosts_endpoint(server):
+ result = server.run("podman run --rm quay.io/iop/host-inventory:latest curl -s -o /dev/null -w '%{http_code}' http://iop-core-host-inventory-api:8081/api/inventory/v1/hosts")
+ if result.succeeded:
+ assert "200" in result.stdout
+
+
+def test_inventory_cleanup_service(server):
+ service = server.service("iop-core-host-inventory-cleanup")
+ assert not service.is_running
+
+
+def test_inventory_cleanup_service_enabled(server):
+ result = server.run("systemctl is-enabled iop-core-host-inventory-cleanup")
+ assert result.succeeded
+ assert "generated" in result.stdout
+
+
+def test_inventory_cleanup_timer(server):
+ service = server.service("iop-core-host-inventory-cleanup.timer")
+ assert service.is_enabled
+ assert service.is_running
+
+
+def test_inventory_cleanup_timer_config(server):
+ timer_file = server.file("/etc/systemd/system/iop-core-host-inventory-cleanup.timer")
+ assert timer_file.exists
+ assert timer_file.is_file
+
+ content = timer_file.content_string
+ assert "OnBootSec=10min" in content
+ assert "OnUnitActiveSec=24h" in content
+ assert "Persistent=true" in content
+ assert "RandomizedDelaySec=300" in content
+ assert "WantedBy=timers.target" in content
\ No newline at end of file
diff --git a/tests/iop/test_kafka.py b/tests/iop/test_kafka.py
new file mode 100644
index 000000000..18a859e90
--- /dev/null
+++ b/tests/iop/test_kafka.py
@@ -0,0 +1,71 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_kafka_service(server):
+ service = server.service("iop-core-kafka")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_kafka_volume(server):
+ result = server.run("podman volume ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-core-kafka-data" in result.stdout
+
+
+def test_kafka_topics_initialized(server):
+ result = server.run("podman exec iop-core-kafka /opt/kafka/init.sh --check")
+ assert result.succeeded
+
+
+def test_kafka_secrets(server):
+ secrets = [
+ 'iop-core-kafka-init-start',
+ 'iop-core-kafka-server-properties',
+ 'iop-core-kafka-init'
+ ]
+
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+
+ for secret_name in secrets:
+ assert secret_name in result.stdout
+
+
+def test_kafka_config_content(server):
+ result = server.run("podman secret inspect iop-core-kafka-server-properties --showsecret")
+ assert result.succeeded
+
+ config_data = result.stdout.strip()
+ assert "advertised.listeners=PLAINTEXT://iop-core-kafka:9092" in config_data
+ assert "controller.quorum.voters=1@iop-core-kafka:9093" in config_data
+
+
+def test_kafka_topic_creation(server):
+ topics = [
+ "platform.engine.results",
+ "platform.insights.rule-hits",
+ "platform.insights.rule-deactivation",
+ "platform.inventory.events",
+ "platform.inventory.host-ingress",
+ "platform.sources.event-stream",
+ "platform.playbook-dispatcher.runs",
+ "platform.upload.announce",
+ "platform.upload.validation",
+ "platform.logging.logs",
+ "platform.payload-status",
+ "platform.remediation-updates.vulnerability",
+ "vulnerability.evaluator.results",
+ "vulnerability.evaluator.recalc",
+ "vulnerability.evaluator.upload",
+ "vulnerability.grouper.inventory.upload",
+ "vulnerability.grouper.advisor.upload"
+ ]
+
+ result = server.run("podman exec iop-core-kafka /opt/kafka/bin/kafka-topics.sh --bootstrap-server iop-core-kafka:9092 --list")
+ assert result.succeeded
+
+ for topic in topics:
+ assert topic in result.stdout
diff --git a/tests/iop/test_puptoo.py b/tests/iop/test_puptoo.py
new file mode 100644
index 000000000..c2c3f1a1e
--- /dev/null
+++ b/tests/iop/test_puptoo.py
@@ -0,0 +1,9 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_puptoo_service(server):
+ service = server.service("iop-core-puptoo")
+ assert service.is_running
+ assert service.is_enabled
\ No newline at end of file
diff --git a/tests/iop/test_remediation.py b/tests/iop/test_remediation.py
new file mode 100644
index 000000000..f8b609618
--- /dev/null
+++ b/tests/iop/test_remediation.py
@@ -0,0 +1,29 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_remediation_api_service(server):
+ service = server.service("iop-service-remediations-api")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_remediation_api_service_dependencies(server):
+ result = server.run("systemctl show iop-service-remediations-api --property=After")
+ assert result.succeeded
+ assert "iop-core-host-inventory-api.service" in result.stdout
+ assert "iop-service-advisor-backend-api.service" in result.stdout
+
+
+def test_remediation_api_environment_variables(server):
+ result = server.run("podman inspect iop-service-remediations-api --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "REDIS_ENABLED=false" in result.stdout
+ assert "RBAC_ENFORCE=false" in result.stdout
+ assert "DB_SSL_ENABLED=false" in result.stdout
+
+
+def test_remediation_api_endpoint(server):
+ result = server.run("curl -s -o /dev/null -w '%{http_code}' http://localhost:9002/ 2>/dev/null || echo '000'")
+ assert result.stdout.strip() != "000"
diff --git a/tests/iop/test_vmaas.py b/tests/iop/test_vmaas.py
new file mode 100644
index 000000000..c7078574d
--- /dev/null
+++ b/tests/iop/test_vmaas.py
@@ -0,0 +1,43 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_vmaas_reposcan_service(server):
+ service = server.service("iop-service-vmaas-reposcan")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vmaas_webapp_go_service(server):
+ service = server.service("iop-service-vmaas-webapp-go")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vmaas_webapp_go_service_dependencies(server):
+ result = server.run("systemctl show iop-service-vmaas-webapp-go --property=After")
+ assert result.succeeded
+ assert "iop-service-vmaas-reposcan.service" in result.stdout
+
+
+def test_vmaas_webapp_go_service_wants(server):
+ result = server.run("systemctl show iop-service-vmaas-webapp-go --property=Wants")
+ assert result.succeeded
+ assert "iop-service-vmaas-reposcan.service" in result.stdout
+
+
+def test_vmaas_database_secrets(server):
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-service-vmaas-reposcan-database-username" in result.stdout
+ assert "iop-service-vmaas-reposcan-database-password" in result.stdout
+ assert "iop-service-vmaas-reposcan-database-name" in result.stdout
+ assert "iop-service-vmaas-reposcan-database-host" in result.stdout
+ assert "iop-service-vmaas-reposcan-database-port" in result.stdout
+
+
+def test_vmaas_data_volume(server):
+ result = server.run("podman volume ls --format '{{.Name}}' | grep iop-service-vmaas-data")
+ assert result.succeeded
+ assert "iop-service-vmaas-data" in result.stdout
diff --git a/tests/iop/test_vulnerability.py b/tests/iop/test_vulnerability.py
new file mode 100644
index 000000000..6e8b7d432
--- /dev/null
+++ b/tests/iop/test_vulnerability.py
@@ -0,0 +1,191 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_vulnerability_manager_service(server):
+ service = server.service("iop-service-vuln-manager")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_dbupgrade_service(server):
+ service = server.service("iop-service-vuln-dbupgrade")
+ assert service.is_enabled
+
+
+def test_vulnerability_taskomatic_service(server):
+ service = server.service("iop-service-vuln-taskomatic")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_grouper_service(server):
+ service = server.service("iop-service-vuln-grouper")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_listener_service(server):
+ service = server.service("iop-service-vuln-listener")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_evaluator_recalc_service(server):
+ service = server.service("iop-service-vuln-evaluator-recalc")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_evaluator_upload_service(server):
+ service = server.service("iop-service-vuln-evaluator-upload")
+ assert service.is_running
+ assert service.is_enabled
+
+
+def test_vulnerability_vmaas_sync_timer(server):
+ timer = server.service("iop-service-vuln-vmaas-sync.timer")
+ assert timer.is_enabled
+
+
+def test_vulnerability_quadlet_files(server):
+ containers = [
+ "iop-service-vuln-dbupgrade",
+ "iop-service-vuln-manager",
+ "iop-service-vuln-taskomatic",
+ "iop-service-vuln-grouper",
+ "iop-service-vuln-listener",
+ "iop-service-vuln-evaluator-recalc",
+ "iop-service-vuln-evaluator-upload",
+ "iop-service-vuln-vmaas-sync",
+ ]
+ for container in containers:
+ quadlet_file = server.file(f"/etc/containers/systemd/{container}.container")
+ assert quadlet_file.exists
+ assert quadlet_file.is_file
+
+
+def test_vulnerability_database_secrets(server):
+ result = server.run("podman secret ls --format '{{.Name}}'")
+ assert result.succeeded
+ assert "iop-service-vulnerability-database-username" in result.stdout
+ assert "iop-service-vulnerability-database-password" in result.stdout
+ assert "iop-service-vulnerability-database-name" in result.stdout
+ assert "iop-service-vulnerability-database-host" in result.stdout
+ assert "iop-service-vulnerability-database-port" in result.stdout
+
+
+def test_vulnerability_containers_networking(server):
+ containers = [
+ "iop-service-vuln-manager",
+ "iop-service-vuln-taskomatic",
+ "iop-service-vuln-grouper",
+ "iop-service-vuln-listener",
+ "iop-service-vuln-evaluator-recalc",
+ "iop-service-vuln-evaluator-upload",
+ ]
+ for container in containers:
+ result = server.run(f"podman inspect {container} --format '{{{{.NetworkSettings.Networks}}}}'")
+ assert result.succeeded
+ assert "iop-core-network" in result.stdout
+
+
+def test_vulnerability_manager_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-manager --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "UNLEASH_BOOTSTRAP_FILE=develfeatureflags.json" in result.stdout
+ assert "DISABLE_RBAC=TRUE" in result.stdout
+
+
+def test_vulnerability_taskomatic_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-taskomatic --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "IS_FEDRAMP=true" in result.stdout
+ assert "JOBS=stale_systems:5,delete_systems:30,cacheman:5" in result.stdout
+ assert "JOBS_STARTUP=cacheman" in result.stdout
+
+
+def test_vulnerability_grouper_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-grouper --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "KAFKA_HOST=iop-core-kafka" in result.stdout
+ assert "KAFKA_PORT=9092" in result.stdout
+ assert "KAFKA_GROUP_ID=vulnerability-grouper" in result.stdout
+ assert "PROMETHEUS_PORT=8085" in result.stdout
+
+
+def test_vulnerability_listener_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-listener --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "KAFKA_GROUP_ID=vulnerability-listener2" in result.stdout
+ assert "EVENTS_TOPIC=platform.inventory.events" in result.stdout
+ assert "ALLOWED_REPORTERS=puptoo,satellite" in result.stdout
+
+
+def test_vulnerability_evaluator_recalc_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-evaluator-recalc --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "EVALUATOR_TOPIC=vulnerability.evaluator.recalc" in result.stdout
+ assert "VMAAS_HOST=http://iop-service-vmaas-webapp-go:8000" in result.stdout
+
+
+def test_vulnerability_evaluator_upload_environment_variables(server):
+ result = server.run("podman inspect iop-service-vuln-evaluator-upload --format '{{.Config.Env}}'")
+ assert result.succeeded
+ assert "EVALUATOR_TOPIC=vulnerability.evaluator.upload" in result.stdout
+ assert "VMAAS_HOST=http://iop-service-vmaas-webapp-go:8000" in result.stdout
+
+
+def test_vulnerability_container_commands(server):
+ containers_commands = {
+ "iop-service-vuln-dbupgrade": ["bash", "-c", "/engine/dbupgrade.sh"],
+ "iop-service-vuln-manager": ["/engine/entrypoint.sh", "manager"],
+ "iop-service-vuln-taskomatic": ["/engine/entrypoint.sh", "taskomatic"],
+ "iop-service-vuln-grouper": ["/engine/entrypoint.sh", "grouper"],
+ "iop-service-vuln-listener": ["/engine/entrypoint.sh", "listener"],
+ "iop-service-vuln-evaluator-recalc": ["/engine/entrypoint.sh", "evaluator"],
+ "iop-service-vuln-evaluator-upload": ["/engine/entrypoint.sh", "evaluator"],
+ }
+ for container, expected_cmd in containers_commands.items():
+ result = server.run(f"podman inspect {container} --format '{{{{.Config.Cmd}}}}'")
+ if result.succeeded:
+ for cmd_part in expected_cmd:
+ assert cmd_part in result.stdout
+
+
+def test_vulnerability_timer_file(server):
+ timer_file = server.file("/etc/systemd/system/iop-service-vuln-vmaas-sync.timer")
+ assert timer_file.exists
+ assert timer_file.is_file
+ assert "OnCalendar=daily" in timer_file.content.decode()
+
+
+def test_vulnerability_fdw_foreign_server_exists(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT * FROM pg_foreign_server WHERE srvname = 'hbi_server';\"")
+ assert result.succeeded
+ assert "hbi_server" in result.stdout
+
+
+def test_vulnerability_fdw_user_mapping_exists(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT * FROM pg_user_mappings WHERE srvname = 'hbi_server' AND usename = 'vulnerability_admin';\"")
+ assert result.succeeded
+ assert "vulnerability_admin" in result.stdout
+
+
+def test_vulnerability_fdw_foreign_table_exists(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT * FROM information_schema.foreign_tables WHERE foreign_table_schema = 'inventory_source' AND foreign_table_name = 'hosts';\"")
+ assert result.succeeded
+ assert "hosts" in result.stdout
+
+
+def test_vulnerability_fdw_view_exists(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT * FROM information_schema.views WHERE table_schema = 'inventory' AND table_name = 'hosts';\"")
+ assert result.succeeded
+ assert "hosts" in result.stdout
+
+
+def test_vulnerability_fdw_view_access(server):
+ result = server.run("podman exec postgresql psql vulnerability_db -c \"SELECT COUNT(*) FROM inventory.hosts;\"")
+ assert "permission denied" not in result.stdout.lower()
+ assert "does not exist" not in result.stdout.lower()
diff --git a/tests/iop/test_vulnerability_frontend.py b/tests/iop/test_vulnerability_frontend.py
new file mode 100644
index 000000000..4b55af898
--- /dev/null
+++ b/tests/iop/test_vulnerability_frontend.py
@@ -0,0 +1,28 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_vulnerability_frontend_assets_directory(server):
+ assets_dir = server.file("/var/www/iop/assets/apps/vulnerability")
+ assert assets_dir.exists
+ assert assets_dir.is_directory
+ assert assets_dir.mode == 0o755
+
+
+def test_vulnerability_frontend_app_info_file(server):
+ app_info_file = server.file("/var/www/iop/assets/apps/vulnerability/app.info.json")
+
+ assert app_info_file.exists
+ assert app_info_file.is_file
+
+
+def test_vulnerability_frontend_javascript_assets_accessible(server):
+ result = server.run("find /var/www/iop/assets/apps/vulnerability -name '*.js' | head -1")
+ assert result.succeeded
+ assert result.stdout.strip()
+ js_file = result.stdout.strip().replace("/var/www/iop", "")
+ curl_result = server.run(f"curl -s -o /dev/null -w '%{{http_code}}' -k https://localhost{js_file}")
+ assert curl_result.succeeded
+ http_code = curl_result.stdout.strip()
+ assert http_code in ["200"]
diff --git a/tests/iop/test_yuptoo.py b/tests/iop/test_yuptoo.py
new file mode 100644
index 000000000..04571922b
--- /dev/null
+++ b/tests/iop/test_yuptoo.py
@@ -0,0 +1,9 @@
+import pytest
+
+pytestmark = pytest.mark.iop
+
+
+def test_yuptoo_service(server):
+ service = server.service("iop-core-yuptoo")
+ assert service.is_running
+ assert service.is_enabled
\ No newline at end of file