From 3f973ffae87d1da2bebf3a2f0596d8588ed4b278 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20M=C3=A9ndez=20Hern=C3=A1ndez?= Date: Sun, 8 Mar 2026 17:37:36 +0100 Subject: [PATCH 1/5] Add reusable role for Podman network management Introduces the deploy_network role, which wraps containers.podman.podman_network to create a named bridge network with configurable properties: internal (no external routing), isolate (no cross-network packet forwarding), IPv6, subnet, gateway, and DNS. Co-Authored-By: Claude Sonnet 4.6 --- src/roles/deploy_network/defaults/main.yaml | 22 +++++++++++++++++++++ src/roles/deploy_network/tasks/main.yaml | 3 +++ src/roles/deploy_network/tasks/podman.yaml | 12 +++++++++++ 3 files changed, 37 insertions(+) create mode 100644 src/roles/deploy_network/defaults/main.yaml create mode 100644 src/roles/deploy_network/tasks/main.yaml create mode 100644 src/roles/deploy_network/tasks/podman.yaml diff --git a/src/roles/deploy_network/defaults/main.yaml b/src/roles/deploy_network/defaults/main.yaml new file mode 100644 index 000000000..d0463c12a --- /dev/null +++ b/src/roles/deploy_network/defaults/main.yaml @@ -0,0 +1,22 @@ +--- +deploy_network_name: foreman-network + +# Network driver +deploy_network_driver: bridge + +# Subnet and gateway (optional - auto-assigned if omitted) +# deploy_network_subnet: "10.89.0.0/24" +# deploy_network_gateway: "10.89.0.1" + +# Prevent containers on this network from reaching external hosts or the host itself +deploy_network_internal: false + +# Prevent containers on this network from reaching containers on other networks +deploy_network_isolate: false + +# Enable IPv6 +deploy_network_ipv6: false + +# DNS servers (optional - uses system defaults if omitted) +# deploy_network_dns: +# - 8.8.8.8 diff --git a/src/roles/deploy_network/tasks/main.yaml b/src/roles/deploy_network/tasks/main.yaml new file mode 100644 index 000000000..d340eb0e8 --- /dev/null +++ b/src/roles/deploy_network/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- name: Deploy network + ansible.builtin.include_tasks: podman.yaml diff --git a/src/roles/deploy_network/tasks/podman.yaml b/src/roles/deploy_network/tasks/podman.yaml new file mode 100644 index 000000000..c20e576bb --- /dev/null +++ b/src/roles/deploy_network/tasks/podman.yaml @@ -0,0 +1,12 @@ +--- +- name: Create network {{ deploy_network_name }} + containers.podman.podman_network: + name: "{{ deploy_network_name }}" + driver: "{{ deploy_network_driver }}" + internal: "{{ deploy_network_internal }}" + ipv6: "{{ deploy_network_ipv6 }}" + subnet: "{{ deploy_network_subnet | default(omit) }}" + gateway: "{{ deploy_network_gateway | default(omit) }}" + dns: "{{ deploy_network_dns | default(omit) }}" + opt: "{{ {'isolate': deploy_network_isolate} if deploy_network_isolate else omit }}" + state: present From fe92afa7b0446e5d6f0f43cac209ca2d199e9aae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20M=C3=A9ndez=20Hern=C3=A1ndez?= Date: Sun, 8 Mar 2026 18:37:46 +0100 Subject: [PATCH 2/5] Isolate container services into dedicated bridge networks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace host networking with four named Podman bridge networks: - foreman-db (internal, isolated): PostgreSQL and its clients - foreman-cache (internal, isolated): Redis and its clients - foreman-app: application layer — Foreman, Candlepin, Pulp, httpd - foreman-proxy-net: Foreman and the smart proxy Services reach each other by container DNS name instead of localhost. Per-service notes: - postgresql: also accessible via a Unix socket bind-mounted at /var/run/postgresql, used by Ansible's community.postgresql modules during deployment so no TCP port needs to be published to the host. - redis: publishes no ports; clients reach it by DNS name on foreman-cache. - candlepin: Tomcat binds to 0.0.0.0 so that other containers on the bridge network can reach it via its DNS name. - migration containers (foreman db:migrate, pulp migrations): attached to foreman-db when the database is internal; switched to foreman-app when the database is external, because foreman-db uses internal: true which removes the default gateway, making it impossible for containers on that network to reach an external database host. Co-Authored-By: Claude Sonnet 4.6 --- .../remote-database/remote-database.yaml | 3 ++ src/playbooks/deploy/deploy.yaml | 16 +++++++++++ src/roles/candlepin/defaults/main.yml | 5 ++-- src/roles/candlepin/tasks/main.yml | 2 +- src/roles/candlepin/templates/broker.xml.j2 | 2 +- src/roles/foreman/defaults/main.yaml | 2 +- src/roles/foreman/tasks/main.yaml | 22 +++++++++++---- src/roles/foreman/templates/katello.yaml.j2 | 4 +-- src/roles/foreman/templates/settings.yaml.j2 | 2 +- src/roles/foreman_proxy/tasks/main.yaml | 4 ++- src/roles/postgresql/defaults/main.yml | 3 +- src/roles/postgresql/tasks/main.yml | 28 ++++++++++++++----- src/roles/pulp/defaults/main.yaml | 14 ++++++++-- src/roles/pulp/tasks/main.yaml | 12 ++++---- src/roles/redis/defaults/main.yml | 1 + src/roles/redis/tasks/main.yaml | 2 +- src/vars/database.yml | 2 +- tests/candlepin_test.py | 6 ++-- tests/postgresql_test.py | 6 ++-- tests/redis_test.py | 14 +++------- 20 files changed, 103 insertions(+), 47 deletions(-) diff --git a/development/playbooks/remote-database/remote-database.yaml b/development/playbooks/remote-database/remote-database.yaml index 0ea469c1d..8f8db7e6e 100644 --- a/development/playbooks/remote-database/remote-database.yaml +++ b/development/playbooks/remote-database/remote-database.yaml @@ -15,6 +15,9 @@ - role: pre_install - role: certificates - role: postgresql + vars: + postgresql_network: host + postgresql_ports: [] tasks: - name: Fetch PostgreSQL SSL CA diff --git a/src/playbooks/deploy/deploy.yaml b/src/playbooks/deploy/deploy.yaml index 89180f1bc..8fc807a1b 100644 --- a/src/playbooks/deploy/deploy.yaml +++ b/src/playbooks/deploy/deploy.yaml @@ -22,6 +22,22 @@ certificate_checks_certificate: "{{ server_certificate }}" certificate_checks_key: "{{ server_key }}" certificate_checks_ca: "{{ ca_certificate }}" + - role: deploy_network + vars: + deploy_network_name: foreman-db + deploy_network_internal: true + deploy_network_isolate: true + - role: deploy_network + vars: + deploy_network_name: foreman-cache + deploy_network_internal: true + deploy_network_isolate: true + - role: deploy_network + vars: + deploy_network_name: foreman-app + - role: deploy_network + vars: + deploy_network_name: foreman-proxy-net - role: postgresql when: - database_mode == 'internal' diff --git a/src/roles/candlepin/defaults/main.yml b/src/roles/candlepin/defaults/main.yml index a0a8b15b4..e5419f438 100644 --- a/src/roles/candlepin/defaults/main.yml +++ b/src/roles/candlepin/defaults/main.yml @@ -1,6 +1,6 @@ --- candlepin_ssl_port: 23443 -candlepin_hostname: localhost +candlepin_hostname: "0.0.0.0" candlepin_tls_versions: - "TLSv1.2" candlepin_ciphers: @@ -15,8 +15,9 @@ candlepin_ciphers: candlepin_container_image: quay.io/foreman/candlepin candlepin_container_tag: "4.4.14" candlepin_registry_auth_file: /etc/foreman/registry-auth.json +candlepin_networks: "{{ (['foreman-db'] if database_mode == 'internal' else []) + ['foreman-app'] }}" -candlepin_database_host: localhost +candlepin_database_host: postgresql candlepin_database_port: 5432 candlepin_database_ssl: false candlepin_database_ssl_mode: disable diff --git a/src/roles/candlepin/tasks/main.yml b/src/roles/candlepin/tasks/main.yml index 4b2628138..f216cb194 100644 --- a/src/roles/candlepin/tasks/main.yml +++ b/src/roles/candlepin/tasks/main.yml @@ -71,7 +71,7 @@ name: "candlepin" image: "{{ candlepin_container_image }}:{{ candlepin_container_tag }}" state: quadlet - network: host + network: "{{ candlepin_networks }}" hostname: "{{ ansible_facts['hostname'] }}.local" secrets: - 'candlepin-ca-cert,target=/etc/candlepin/certs/candlepin-ca.crt,mode=0440,type=mount' diff --git a/src/roles/candlepin/templates/broker.xml.j2 b/src/roles/candlepin/templates/broker.xml.j2 index 3a247b9c2..3c941db2b 100644 --- a/src/roles/candlepin/templates/broker.xml.j2 +++ b/src/roles/candlepin/templates/broker.xml.j2 @@ -10,7 +10,7 @@ vm://0 - tcp://localhost:61613?protocols=STOMP;useEpoll=false;sslEnabled=true;trustStorePath=/etc/candlepin/certs/truststore;trustStorePassword={{ candlepin_keystore_password }};keyStorePath=/etc/candlepin/certs/keystore;keyStorePassword={{ candlepin_keystore_password }};needClientAuth=true + tcp://{{ candlepin_hostname }}:61613?protocols=STOMP;useEpoll=false;sslEnabled=true;trustStorePath=/etc/candlepin/certs/truststore;trustStorePassword={{ candlepin_keystore_password }};keyStorePath=/etc/candlepin/certs/keystore;keyStorePassword={{ candlepin_keystore_password }};needClientAuth=true true diff --git a/src/roles/foreman/defaults/main.yaml b/src/roles/foreman/defaults/main.yaml index 419f4efec..1a93085ae 100644 --- a/src/roles/foreman/defaults/main.yaml +++ b/src/roles/foreman/defaults/main.yaml @@ -5,7 +5,7 @@ foreman_registry_auth_file: /etc/foreman/registry-auth.json foreman_database_name: foreman foreman_database_user: foreman -foreman_database_host: localhost +foreman_database_host: postgresql foreman_database_port: 5432 foreman_database_pool: 9 foreman_database_ssl_mode: disable diff --git a/src/roles/foreman/tasks/main.yaml b/src/roles/foreman/tasks/main.yaml index 43139ead5..befeb3411 100644 --- a/src/roles/foreman/tasks/main.yaml +++ b/src/roles/foreman/tasks/main.yaml @@ -101,7 +101,13 @@ image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" state: quadlet sdnotify: true - network: host + network: + - foreman-db + - foreman-cache + - foreman-app + - foreman-proxy-net + ports: + - "127.0.0.1:3000:3000" hostname: "{{ ansible_facts['hostname'] }}.local" volume: - 'foreman-data-run:/var/run/foreman:z' @@ -137,7 +143,10 @@ image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" state: quadlet sdnotify: true - network: host + network: + - foreman-db + - foreman-cache + - foreman-app hostname: "{{ ansible_facts['hostname'] }}.local" volume: - 'foreman-data-run:/var/run/foreman:z' @@ -151,7 +160,7 @@ - 'foreman-dynflow-worker-hosts-queue-yaml,type=mount,target=/etc/foreman/dynflow/worker-hosts-queue.yml' - 'foreman-db-ca,type=mount,target={{ foreman_database_ssl_ca_path }}' env: - DYNFLOW_REDIS_URL: "redis://localhost:6379/6" + DYNFLOW_REDIS_URL: "redis://redis:6379/6" REDIS_PROVIDER: "DYNFLOW_REDIS_URL" FOREMAN_ENABLED_PLUGINS: "{{ foreman_plugins | join(' ') }}" command: "/usr/libexec/foreman/sidekiq-selinux -e production -r ./extras/dynflow-sidekiq.rb -C /etc/foreman/dynflow/%i.yml" @@ -189,7 +198,10 @@ state: quadlet image: "{{ foreman_container_image }}:{{ foreman_container_tag }}" sdnotify: false - network: host + network: + - foreman-db + - foreman-cache + - foreman-app hostname: "{{ ansible_facts['hostname'] }}.local" command: "foreman-rake {{ item.rake }}" volume: @@ -239,7 +251,7 @@ - bin/rails db:migrate && bin/rails db:seed detach: false rm: true - network: host + network: "{{ ['foreman-db'] if database_mode == 'internal' else ['foreman-app'] }}" env: FOREMAN_ENABLED_PLUGINS: "{{ foreman_plugins | join(' ') }}" secrets: diff --git a/src/roles/foreman/templates/katello.yaml.j2 b/src/roles/foreman/templates/katello.yaml.j2 index 668d2cf7b..8f005cff4 100644 --- a/src/roles/foreman/templates/katello.yaml.j2 +++ b/src/roles/foreman/templates/katello.yaml.j2 @@ -3,13 +3,13 @@ :rest_client_timeout: 3600 :candlepin: - :url: https://localhost:23443/candlepin + :url: https://candlepin:23443/candlepin :oauth_key: "katello" :oauth_secret: "{{ candlepin_oauth_secret }}" :ca_cert_file: /etc/foreman/katello-default-ca.crt :candlepin_events: - :broker_host: localhost + :broker_host: candlepin :ssl_cert_file: /etc/foreman/client_cert.pem :ssl_key_file: /etc/foreman/client_key.pem :ssl_ca_file: /etc/foreman/katello-default-ca.crt diff --git a/src/roles/foreman/templates/settings.yaml.j2 b/src/roles/foreman/templates/settings.yaml.j2 index 9c6ef2197..288643b92 100644 --- a/src/roles/foreman/templates/settings.yaml.j2 +++ b/src/roles/foreman/templates/settings.yaml.j2 @@ -10,7 +10,7 @@ :rails_cache_store: :type: redis :urls: - - redis://localhost:6379/4 + - redis://redis:6379/4 :options: :compress: true :namespace: foreman diff --git a/src/roles/foreman_proxy/tasks/main.yaml b/src/roles/foreman_proxy/tasks/main.yaml index 236b9377f..4ed5c9bc9 100644 --- a/src/roles/foreman_proxy/tasks/main.yaml +++ b/src/roles/foreman_proxy/tasks/main.yaml @@ -18,7 +18,9 @@ image: "{{ foreman_proxy_container_image }}:{{ foreman_proxy_container_tag }}" state: quadlet sdnotify: true - network: host + network: foreman-proxy-net + ports: + - "0.0.0.0:8443:8443" hostname: "{{ ansible_facts['hostname'] }}.local" secrets: - 'foreman-proxy-settings-yml,type=mount,target=/etc/foreman-proxy/settings.yml' diff --git a/src/roles/postgresql/defaults/main.yml b/src/roles/postgresql/defaults/main.yml index 7c80c3a68..645ee5538 100644 --- a/src/roles/postgresql/defaults/main.yml +++ b/src/roles/postgresql/defaults/main.yml @@ -3,7 +3,8 @@ postgresql_container_image: quay.io/sclorg/postgresql-13-c9s postgresql_container_tag: "latest" postgresql_registry_auth_file: /etc/foreman/registry-auth.json postgresql_container_name: postgresql -postgresql_network: host +postgresql_network: foreman-db +postgresql_socket_dir: /run/postgresql postgresql_restart_policy: always postgresql_data_dir: /var/lib/pgsql/data diff --git a/src/roles/postgresql/tasks/main.yml b/src/roles/postgresql/tasks/main.yml index 9168d9075..584cb28b1 100644 --- a/src/roles/postgresql/tasks/main.yml +++ b/src/roles/postgresql/tasks/main.yml @@ -14,6 +14,19 @@ owner: 26 group: 26 +- name: Install tmpfiles.d entry for PostgreSQL socket directory + ansible.builtin.copy: + dest: /usr/lib/tmpfiles.d/foremanctl-postgresql.conf + content: "d {{ postgresql_socket_dir }} 0755 26 26 -\n" + mode: "0644" + owner: root + group: root + +- name: Create PostgreSQL socket directory + ansible.builtin.command: systemd-tmpfiles --create /usr/lib/tmpfiles.d/foremanctl-postgresql.conf + args: + creates: "{{ postgresql_socket_dir }}" + - name: Create Podman secret for PostgreSQL admin password containers.podman.podman_secret: name: postgresql-admin-password @@ -28,9 +41,10 @@ state: quadlet healthcheck: pg_isready sdnotify: healthy - network: host + network: "{{ postgresql_network }}" volumes: - "{{ postgresql_data_dir }}:/var/lib/pgsql/data:Z" + - "{{ postgresql_socket_dir }}:{{ postgresql_socket_dir }}:Z" secrets: - 'postgresql-admin-password,target=POSTGRESQL_ADMIN_PASSWORD,type=env' env: @@ -114,20 +128,21 @@ - name: Use scram-sha-256 for password encryption community.postgresql.postgresql_set: login_user: postgres - login_password: "{{ postgresql_admin_password }}" - login_host: localhost + login_unix_socket: "{{ postgresql_socket_dir }}" name: password_encryption value: "scram-sha-256" notify: - Restart postgresql +- name: Flush handlers to apply password encryption change + ansible.builtin.meta: flush_handlers + - name: Create PostgreSQL users community.postgresql.postgresql_user: name: "{{ item.name }}" password: "{{ item.password }}" login_user: postgres - login_password: "{{ postgresql_admin_password }}" - login_host: localhost + login_unix_socket: "{{ postgresql_socket_dir }}" role_attr_flags: "{{ item.role_attr_flags | default(omit) }}" state: present loop: "{{ postgresql_users }}" @@ -138,7 +153,6 @@ name: "{{ item.name }}" owner: "{{ item.owner }}" login_user: postgres - login_password: "{{ postgresql_admin_password }}" - login_host: localhost + login_unix_socket: "{{ postgresql_socket_dir }}" state: present loop: "{{ postgresql_databases }}" diff --git a/src/roles/pulp/defaults/main.yaml b/src/roles/pulp/defaults/main.yaml index c42995503..8e016bd73 100644 --- a/src/roles/pulp/defaults/main.yaml +++ b/src/roles/pulp/defaults/main.yaml @@ -33,7 +33,17 @@ pulp_enabled_plugins: "{{ pulp_default_plugins + pulp_plugins }}" pulp_database_name: pulp pulp_database_user: pulp -pulp_database_host: localhost +pulp_database_host: postgresql +pulp_redis_url: "redis://redis:6379/8" +pulp_networks: + - foreman-db + - foreman-cache + - foreman-app +pulp_api_ports: + - "127.0.0.1:24817:24817" +pulp_content_ports: + - "127.0.0.1:24816:24816" +pulp_migration_networks: "{{ ['foreman-db'] if database_mode == 'internal' else ['foreman-app'] }}" pulp_database_port: 5432 pulp_database_ssl_mode: disabled pulp_database_ssl_ca: @@ -57,7 +67,7 @@ pulp_settings_other_env: ['pulpcore.app.authentication.PulpNoCreateRemoteUserBackend'] PULP_CACHE_ENABLED: "true" PULP_CONTENT_ORIGIN: "{{ pulp_content_origin }}" - PULP_REDIS_URL: "redis://localhost:6379/8" + PULP_REDIS_URL: "{{ pulp_redis_url }}" PULP_REMOTE_USER_ENVIRON_NAME: "HTTP_REMOTE_USER" PULP_REST_FRAMEWORK__DEFAULT_AUTHENTICATION_CLASSES: >- ['rest_framework.authentication.SessionAuthentication', 'pulpcore.app.authentication.PulpRemoteUserAuthentication'] diff --git a/src/roles/pulp/tasks/main.yaml b/src/roles/pulp/tasks/main.yaml index 66dcad042..a8ab03218 100644 --- a/src/roles/pulp/tasks/main.yaml +++ b/src/roles/pulp/tasks/main.yaml @@ -101,7 +101,8 @@ state: quadlet sdnotify: true command: pulp-api - network: host + network: "{{ pulp_networks }}" + ports: "{{ pulp_api_ports | default(omit) }}" hostname: "pulp-api.{{ ansible_facts['hostname'] }}.local" volumes: "{{ pulp_volumes }}" security_opt: @@ -132,7 +133,8 @@ state: quadlet sdnotify: true command: pulp-content - network: host + network: "{{ pulp_networks }}" + ports: "{{ pulp_content_ports | default(omit) }}" hostname: "pulp-content.{{ ansible_facts['hostname'] }}.local" volumes: "{{ pulp_volumes }}" security_opt: @@ -163,7 +165,7 @@ image: "{{ pulp_worker_image }}" state: quadlet command: pulp-worker - network: host + network: "{{ pulp_networks }}" hostname: "pulp-worker-%i.{{ ansible_facts['hostname'] }}.local" volumes: "{{ pulp_volumes }}" security_opt: @@ -218,7 +220,7 @@ command: pulpcore-manager migrate --noinput detach: false rm: true - network: host + network: "{{ pulp_migration_networks }}" volumes: "{{ pulp_volumes }}" secrets: - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' @@ -233,7 +235,7 @@ command: pulpcore-manager reset-admin-password --random detach: false rm: true - network: host + network: "{{ pulp_migration_networks }}" volumes: "{{ pulp_volumes }}" secrets: - 'pulp-symmetric-key,type=mount,target=/etc/pulp/certs/database_fields.symmetric.key' diff --git a/src/roles/redis/defaults/main.yml b/src/roles/redis/defaults/main.yml index 1b0e2af3f..d3509e2f9 100644 --- a/src/roles/redis/defaults/main.yml +++ b/src/roles/redis/defaults/main.yml @@ -2,3 +2,4 @@ redis_container_image: quay.io/sclorg/redis-6-c9s redis_container_tag: "latest" redis_registry_auth_file: /etc/foreman/registry-auth.json +redis_network: foreman-cache diff --git a/src/roles/redis/tasks/main.yaml b/src/roles/redis/tasks/main.yaml index 93837c90c..bd860bb58 100644 --- a/src/roles/redis/tasks/main.yaml +++ b/src/roles/redis/tasks/main.yaml @@ -19,9 +19,9 @@ name: redis image: "{{ redis_container_image }}:{{ redis_container_tag }}" state: quadlet - network: host sdnotify: true command: ["run-redis", "--supervised", "systemd"] + network: "{{ redis_network }}" volumes: - /var/lib/redis:/data:Z quadlet_options: diff --git a/src/vars/database.yml b/src/vars/database.yml index 3f4a73cd1..d0457d17b 100644 --- a/src/vars/database.yml +++ b/src/vars/database.yml @@ -1,5 +1,5 @@ --- -database_host: localhost +database_host: postgresql database_port: 5432 database_ssl_mode: disable database_ssl_ca: diff --git a/tests/candlepin_test.py b/tests/candlepin_test.py index dc78faa8e..bff16fef2 100644 --- a/tests/candlepin_test.py +++ b/tests/candlepin_test.py @@ -18,7 +18,7 @@ def test_candlepin_port(server): def test_candlepin_status(server, certificates): - status = server.run(f"curl --cacert {certificates['ca_certificate']} --silent --output /dev/null --write-out '%{{http_code}}' https://localhost:23443/candlepin/status") + status = server.run(f"curl --cacert {certificates['ca_certificate']} --resolve candlepin:23443:127.0.0.1 --silent --output /dev/null --write-out '%{{http_code}}' https://candlepin:23443/candlepin/status") assert status.succeeded assert status.stdout == '200' @@ -29,7 +29,7 @@ def test_artemis_port(server): def test_artemis_auth(server, certificates): - cmd = server.run(f'echo "" | openssl s_client -CAfile {certificates["ca_certificate"]} -cert {certificates["client_certificate"]} -key {certificates["client_key"]} -connect localhost:61613') + cmd = server.run(f'echo "" | openssl s_client -CAfile {certificates["ca_certificate"]} -cert {certificates["client_certificate"]} -key {certificates["client_key"]} -connect 127.0.0.1:61613 -servername candlepin') assert cmd.succeeded, f"exit: {cmd.rc}\n\nstdout:\n{cmd.stdout}\n\nstderr:\n{cmd.stderr}" @@ -40,7 +40,7 @@ def test_certs_users_file(server, certificates): def test_tls(server): - result = server.run('nmap --script +ssl-enum-ciphers localhost -p 23443') + result = server.run('nmap -sT --script +ssl-enum-ciphers localhost -p 23443') result = result.stdout # We don't enable TLSv1.3 by default yet. TLSv1.3 support was added in tomcat 7.0.92 # But tomcat 7.0.76 is the latest version available on EL7 diff --git a/tests/postgresql_test.py b/tests/postgresql_test.py index fa84f6d0b..e43cfab7a 100644 --- a/tests/postgresql_test.py +++ b/tests/postgresql_test.py @@ -7,9 +7,9 @@ def test_postgresql_service(database): assert postgresql.is_running -def test_postgresql_port(database): - postgresql = database.addr("localhost") - assert postgresql.port("5432").is_reachable +def test_postgresql_socket(database): + socket = database.file("/var/run/postgresql/.s.PGSQL.5432") + assert socket.exists def test_postgresql_databases(database): diff --git a/tests/redis_test.py b/tests/redis_test.py index c612ba035..4aaa5e49d 100644 --- a/tests/redis_test.py +++ b/tests/redis_test.py @@ -1,15 +1,9 @@ -import pytest - - -REDIS_HOST = 'localhost' -REDIS_PORT = 6379 - - def test_redis_service(server): redis = server.service("redis") assert redis.is_running -def test_redis_port(server): - redis = server.addr(REDIS_HOST) - assert redis.port(REDIS_PORT).is_reachable +def test_redis_ping(server): + result = server.run("podman exec redis redis-cli ping") + assert result.succeeded + assert result.stdout.strip() == "PONG" From b82ab60c9502cd11178b6172a3cc2be956682226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20M=C3=A9ndez=20Hern=C3=A1ndez?= Date: Sun, 8 Mar 2026 17:52:22 +0100 Subject: [PATCH 3/5] Use host networking in the development environment The development setup runs the Foreman Rails process directly on the host rather than in a container, so it cannot resolve container bridge DNS names such as postgresql or candlepin. Keeping host networking for all services in the devel environment means every service remains reachable on localhost as before, without requiring per-service URL overrides for the Rails process. Co-Authored-By: Claude Sonnet 4.6 --- development/playbooks/deploy-dev/deploy-dev.yaml | 13 +++++++++++++ .../roles/foreman_development/tasks/main.yaml | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/development/playbooks/deploy-dev/deploy-dev.yaml b/development/playbooks/deploy-dev/deploy-dev.yaml index 738381cd4..7b6c8b728 100644 --- a/development/playbooks/deploy-dev/deploy-dev.yaml +++ b/development/playbooks/deploy-dev/deploy-dev.yaml @@ -17,6 +17,7 @@ - role: certificates - role: postgresql vars: + postgresql_network: host postgresql_databases: - name: "{{ candlepin_database_name }}" owner: "{{ candlepin_database_user }}" @@ -35,9 +36,21 @@ - name: "{{ pulp_database_user }}" password: "{{ pulp_database_password }}" - role: redis + vars: + redis_network: host - role: candlepin + vars: + candlepin_networks: host + candlepin_database_host: localhost - role: httpd - role: pulp + vars: + pulp_networks: host + pulp_migration_networks: host + pulp_database_host: localhost + pulp_redis_url: "redis://localhost:6379/8" + pulp_api_ports: [] + pulp_content_ports: [] - role: foreman_development post_tasks: - name: Display development environment information diff --git a/development/roles/foreman_development/tasks/main.yaml b/development/roles/foreman_development/tasks/main.yaml index 2691b9bda..96a23d967 100644 --- a/development/roles/foreman_development/tasks/main.yaml +++ b/development/roles/foreman_development/tasks/main.yaml @@ -222,10 +222,10 @@ ansible.builtin.uri: url: '{{ foreman_development_url }}/api/v2/ping' validate_certs: false + register: foreman_development_status until: foreman_development_status.status == 200 retries: 30 delay: 5 - register: foreman_development_status - name: Configure Foreman Proxy for Pulp theforeman.foreman.smart_proxy: From 0d376c1e9348fe07f9a6816282bdd9783fef97a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20M=C3=A9ndez=20Hern=C3=A1ndez?= Date: Sun, 8 Mar 2026 23:46:07 +0100 Subject: [PATCH 4/5] Issue a dedicated TLS certificate for the Candlepin container hostname MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit foreman-certs generates certificates only for localhost and the server FQDN. Now that Foreman connects to Candlepin via the bridge DNS name "candlepin", TLS hostname validation fails against those certificates. A dedicated certificate with SAN=candlepin, signed by the installer CA, is generated using the openssl CLI (consistent with the rest of the codebase). The certificate validity is set to 7300 days (20 years), matching the default used by puppet-certs (theforeman/puppet-certs manifests/init.pp $expiration parameter). The signing step runs on every deployment to ensure the certificate is always freshly dated, consistent with how foreman-certs handles the other installer certificates. The localhost certificate previously used by Candlepin's Tomcat is now unused and removed: - "localhost" dropped from certificates_hostnames — the certificates role no longer generates the cert. - localhost_key / localhost_certificate removed from default_certificates.yml and installer_certificates.yml. The healthcheck is updated to validate the new certificate instead of skipping verification with --insecure. --resolve candlepin:23443:127.0.0.1 forces the connection to the loopback so it works in both bridge networking (where other containers reach Candlepin by DNS name) and host networking (where "candlepin" would not resolve via container DNS). Tests are updated to route connectivity checks through the foreman container (which shares the same bridge network as candlepin), to verify against the new certificate, and to check candlepin_certificate expiry instead of the now-removed localhost_certificate. Co-Authored-By: Claude Sonnet 4.6 --- .../foreman_installer_certs/tasks/main.yml | 44 +++++++++++++++++++ src/roles/candlepin/tasks/main.yml | 2 +- src/vars/base.yaml | 6 +-- src/vars/default_certificates.yml | 4 +- src/vars/installer_certificates.yml | 4 +- tests/candlepin_test.py | 41 +++++------------ tests/certificates_test.py | 2 +- 7 files changed, 65 insertions(+), 38 deletions(-) diff --git a/development/roles/foreman_installer_certs/tasks/main.yml b/development/roles/foreman_installer_certs/tasks/main.yml index bf4c0f0d9..f96091091 100644 --- a/development/roles/foreman_installer_certs/tasks/main.yml +++ b/development/roles/foreman_installer_certs/tasks/main.yml @@ -14,3 +14,47 @@ - name: Generate certs ansible.builtin.command: foreman-certs --apache true --foreman true --candlepin true changed_when: false + +# foreman-certs only generates certs for localhost and the server FQDN. Candlepin +# runs in its own container and is reachable by other containers via the DNS name +# "candlepin" on the bridge network. Foreman validates the TLS certificate hostname +# when connecting to https://candlepin:23443, so a dedicated cert with SAN=candlepin +# is required. +- name: Create candlepin cert directory + ansible.builtin.file: + path: /root/ssl-build/candlepin + state: directory + mode: '0755' + +- name: Generate candlepin Tomcat private key + ansible.builtin.command: > + openssl genrsa + -out /root/ssl-build/candlepin/candlepin-tomcat.key + 4096 + args: + creates: /root/ssl-build/candlepin/candlepin-tomcat.key + +- name: Generate candlepin Tomcat certificate signing request + ansible.builtin.command: > + openssl req + -new + -key /root/ssl-build/candlepin/candlepin-tomcat.key + -subj "/CN=candlepin" + -addext "subjectAltName = DNS:candlepin" + -out /root/ssl-build/candlepin/candlepin-tomcat.csr + args: + creates: /root/ssl-build/candlepin/candlepin-tomcat.csr + +- name: Sign candlepin certificate with installer CA + ansible.builtin.command: > + openssl x509 + -req + -in /root/ssl-build/candlepin/candlepin-tomcat.csr + -CA /root/ssl-build/katello-default-ca.crt + -CAkey /root/ssl-build/katello-default-ca.key + -CAcreateserial + -days 7300 + -passin "file:/root/ssl-build/katello-default-ca.pwd" + -copy_extensions copy + -out /root/ssl-build/candlepin/candlepin-tomcat.crt + changed_when: true diff --git a/src/roles/candlepin/tasks/main.yml b/src/roles/candlepin/tasks/main.yml index f216cb194..c6e4c43a8 100644 --- a/src/roles/candlepin/tasks/main.yml +++ b/src/roles/candlepin/tasks/main.yml @@ -100,7 +100,7 @@ After=redis.service postgresql.service [Service] TimeoutStartSec=300 - healthcheck: curl --fail --insecure https://localhost:23443/candlepin/status + healthcheck: curl --fail --cacert /etc/candlepin/certs/candlepin-ca.crt --resolve candlepin:23443:127.0.0.1 https://candlepin:23443/candlepin/status sdnotify: healthy - name: Run daemon reload to make Quadlet create the service files diff --git a/src/vars/base.yaml b/src/vars/base.yaml index 16765f9a7..108dd0a9f 100644 --- a/src/vars/base.yaml +++ b/src/vars/base.yaml @@ -1,7 +1,7 @@ --- certificates_hostnames: - "{{ ansible_facts['fqdn'] }}" - - localhost + - candlepin certificates_ca_password: "CHANGEME" candlepin_keystore_password: "CHANGEME" @@ -10,8 +10,8 @@ candlepin_oauth_secret: "CHANGEME" candlepin_ca_key_password: "{{ ca_key_password }}" candlepin_ca_key: "{{ ca_key }}" candlepin_ca_certificate: "{{ ca_certificate }}" -candlepin_tomcat_key: "{{ localhost_key }}" -candlepin_tomcat_certificate: "{{ localhost_certificate }}" +candlepin_tomcat_key: "{{ candlepin_key }}" +candlepin_tomcat_certificate: "{{ candlepin_certificate }}" candlepin_client_key: "{{ client_key }}" candlepin_client_certificate: "{{ client_certificate }}" diff --git a/src/vars/default_certificates.yml b/src/vars/default_certificates.yml index 09f47c5c9..4a5ac10b7 100644 --- a/src/vars/default_certificates.yml +++ b/src/vars/default_certificates.yml @@ -9,5 +9,5 @@ server_ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt" client_certificate: "{{ certificates_ca_directory }}/certs/{{ ansible_facts['fqdn'] }}-client.crt" client_key: "{{ certificates_ca_directory }}/private/{{ ansible_facts['fqdn'] }}-client.key" client_ca_certificate: "{{ certificates_ca_directory }}/certs/ca.crt" -localhost_key: "{{ certificates_ca_directory }}/private/localhost.key" -localhost_certificate: "{{ certificates_ca_directory }}/certs/localhost.crt" +candlepin_key: "{{ certificates_ca_directory }}/private/candlepin.key" +candlepin_certificate: "{{ certificates_ca_directory }}/certs/candlepin.crt" diff --git a/src/vars/installer_certificates.yml b/src/vars/installer_certificates.yml index c6ab83af3..a0f361153 100644 --- a/src/vars/installer_certificates.yml +++ b/src/vars/installer_certificates.yml @@ -8,5 +8,5 @@ server_ca_certificate: "/root/ssl-build/katello-server-ca.crt" client_certificate: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-foreman-client.crt" client_key: "/root/ssl-build/{{ ansible_facts['fqdn'] }}/{{ ansible_facts['fqdn'] }}-foreman-client.key" client_ca_certificate: "{{ ca_certificate }}" -localhost_key: "/root/ssl-build/localhost/localhost-tomcat.key" -localhost_certificate: "/root/ssl-build/localhost/localhost-tomcat.crt" +candlepin_key: "/root/ssl-build/candlepin/candlepin-tomcat.key" +candlepin_certificate: "/root/ssl-build/candlepin/candlepin-tomcat.crt" diff --git a/tests/candlepin_test.py b/tests/candlepin_test.py index bff16fef2..64bd72515 100644 --- a/tests/candlepin_test.py +++ b/tests/candlepin_test.py @@ -1,5 +1,3 @@ -import re - def assert_secret_content(server, secret_name, secret_value): secret = server.run(f'podman secret inspect --format {"{{.SecretData}}"} --showsecret {secret_name}') @@ -12,24 +10,14 @@ def test_candlepin_service(server): assert candlepin.is_running -def test_candlepin_port(server): - candlepin = server.addr("localhost") - assert candlepin.port("23443").is_reachable - - -def test_candlepin_status(server, certificates): - status = server.run(f"curl --cacert {certificates['ca_certificate']} --resolve candlepin:23443:127.0.0.1 --silent --output /dev/null --write-out '%{{http_code}}' https://candlepin:23443/candlepin/status") +def test_candlepin_status(server): + status = server.run("podman exec foreman curl --cacert /etc/foreman/katello-default-ca.crt --silent --output /dev/null --write-out '%{http_code}' https://candlepin:23443/candlepin/status") assert status.succeeded assert status.stdout == '200' -def test_artemis_port(server): - candlepin = server.addr("localhost") - assert candlepin.port("61613").is_reachable - - -def test_artemis_auth(server, certificates): - cmd = server.run(f'echo "" | openssl s_client -CAfile {certificates["ca_certificate"]} -cert {certificates["client_certificate"]} -key {certificates["client_key"]} -connect 127.0.0.1:61613 -servername candlepin') +def test_artemis_auth(server): + cmd = server.run('podman exec foreman bash -c \'echo "" | openssl s_client -CAfile /etc/foreman/katello-default-ca.crt -cert /etc/foreman/client_cert.pem -key /etc/foreman/client_key.pem -connect candlepin:61613 -servername candlepin\'') assert cmd.succeeded, f"exit: {cmd.rc}\n\nstdout:\n{cmd.stdout}\n\nstderr:\n{cmd.stderr}" @@ -40,21 +28,16 @@ def test_certs_users_file(server, certificates): def test_tls(server): - result = server.run('nmap -sT --script +ssl-enum-ciphers localhost -p 23443') - result = result.stdout - # We don't enable TLSv1.3 by default yet. TLSv1.3 support was added in tomcat 7.0.92 - # But tomcat 7.0.76 is the latest version available on EL7 - assert "TLSv1.3" not in result - - # Test that TLSv1.2 is enabled - assert "TLSv1.2" in result + ca = '/etc/foreman/katello-default-ca.crt' - # Test that older TLS versions are disabled - assert "TLSv1.1" not in result - assert "TLSv1.0" not in result + # TLSv1.2 should be enabled + result = server.run(f'podman exec foreman bash -c "echo Q | openssl s_client -connect candlepin:23443 -tls1_2 -CAfile {ca} 2>&1"') + assert "Cipher is" in result.stdout, f"TLSv1.2 not available:\n{result.stdout}" - # Test that the least cipher strength is "strong" or "A" - assert "least strength: A" in result + # TLSv1.3, TLSv1.1 and TLSv1.0 should be disabled + for flag in ['-tls1_3', '-tls1_1', '-tls1']: + result = server.run(f'podman exec foreman bash -c "echo Q | openssl s_client -connect candlepin:23443 {flag} -CAfile {ca} 2>&1"') + assert result.rc != 0, f"TLS version ({flag}) should be disabled:\n{result.stdout}" def test_cert_roles(server): diff --git a/tests/certificates_test.py b/tests/certificates_test.py index 2933a861c..9947b029f 100644 --- a/tests/certificates_test.py +++ b/tests/certificates_test.py @@ -6,7 +6,7 @@ def certificate_info(server, certificate): openssl_result = server.run(f"openssl x509 -in {certificate} -noout -enddate -dateopt iso_8601 -subject -issuer") return dict([x.split('=', 1) for x in openssl_result.stdout.splitlines()]) -@pytest.mark.parametrize("certificate_type", ['ca_certificate', 'server_certificate', 'client_certificate', 'localhost_certificate']) +@pytest.mark.parametrize("certificate_type", ['ca_certificate', 'server_certificate', 'client_certificate', 'candlepin_certificate']) def test_certificate_expiry(server, certificates, certificate_type): openssl_data = certificate_info(server, certificates[certificate_type]) not_after = dateutil.parser.parse(openssl_data['notAfter']) From f48bf127276f416434a4d57353dfee0d5e44d61f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20M=C3=A9ndez=20Hern=C3=A1ndez?= Date: Sun, 8 Mar 2026 17:42:12 +0100 Subject: [PATCH 5/5] Document container networking architecture Adds docs/deployment.md with a description of the four bridge networks introduced by this series, their properties (internal, isolate), which containers are attached to each, and the rationale for the port publishing decisions. Co-Authored-By: Claude Sonnet 4.6 --- docs/deployment.md | 92 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/docs/deployment.md b/docs/deployment.md index 376138a77..a1866b78a 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -236,3 +236,95 @@ As `foremanctl` is Ansible-based, this means that the ["control node"](https://d To simplify the "install `foremanctl`" step, our test infrastructure uses different systems for the "control node" (the system the source code is cloned to) and the "target node" (the VM created by our development tooling). There is a desire to allow deployments where a single `foremanctl` control node manages multiple managed nodes, but no code exists yet for this. + +## Container Networking + +All containers are connected to one or more named Podman bridge networks instead of sharing the host network namespace, limiting lateral movement: a container can only reach the services it is explicitly connected to. + +### Networks + +#### `foreman-db` + +**Properties:** `internal: true`, `isolate: true` + +The database network. Only containers that need to read or write persistent data are attached. + +- `internal: true` removes the default gateway, so no container on this network can initiate outbound internet connections. Database servers have no reason to reach the internet, and clients that need internet access (e.g. for content sync) are multi-homed and use a different network for that. +- `isolate: true` prevents containers on this network from forwarding packets to containers on other bridge networks, closing off lateral movement paths between network segments. + +| Container | Role | +|-----------|------| +| `postgresql` | Server — listens on port 5432 (internal DB only) | +| `foreman` | Client | +| `dynflow-sidekiq@*` | Client | +| `foreman-recurring@*` | Client | +| `candlepin` | Client (internal DB only) | +| `pulp-api` | Client | +| `pulp-content` | Client | +| `pulp-worker@*` | Client | + +Ansible's `community.postgresql.*` modules reach the database during deployment via a Unix socket: `/var/run/postgresql` is bind-mounted from the host into the container so the socket is accessible on the host without publishing a TCP port. + +#### `foreman-cache` + +**Properties:** `internal: true`, `isolate: true` + +The cache network. Only containers that need to reach Redis are attached. The same rationale as `foreman-db` applies: cache servers have no business reaching the internet, and the `isolate` flag prevents bridge pivoting. + +| Container | Role | +|-----------|------| +| `redis` | Server — listens on port 6379 | +| `foreman` | Client — app cache and Dynflow queue | +| `dynflow-sidekiq@*` | Client — job queue | +| `foreman-recurring@*` | Client — job queue | +| `pulp-api` | Client | +| `pulp-content` | Client | +| `pulp-worker@*` | Client | + +Redis does not publish any port to the host: it is a purely internal service with no legitimate consumers outside the container network. + +#### `foreman-app` + +**Properties:** none (`internal: false`, `isolate: false`) + +The application network. Containers that need to communicate with each other at the application layer, or that need outbound internet access (e.g. for content synchronisation), are attached here. + +| Container | Role | +|-----------|------| +| `candlepin` | Server — Tomcat (23443) and Artemis STOMP broker (61613) | +| `foreman` | Client to Candlepin; serves Foreman Proxy requests | +| `dynflow-sidekiq@*` | Client | +| `foreman-recurring@*` | Client | +| `pulp-api` | Server — API (24817); needs internet for content sync | +| `pulp-content` | Server — content (24816); needs internet for content sync | +| `pulp-worker@*` | Worker — needs internet for content sync | + +Candlepin does not publish any ports to the host: `foreman` reaches it directly over the bridge using its DNS name. `foreman`, `pulp-api`, and `pulp-content` publish their respective ports to `127.0.0.1` so that the `httpd` reverse proxy running on the host can reach them. + +#### `foreman-proxy-net` + +**Properties:** none (`internal: false`, `isolate: false`) + +The proxy network, used exclusively for communication between Foreman and Foreman Proxy. Keeping this traffic on a dedicated network makes it straightforward to apply stricter controls in future without affecting the rest of the application. + +| Container | Role | +|-----------|------| +| `foreman-proxy` | Server — listens on 0.0.0.0:8443 (external) | +| `foreman` | Client | + +`foreman-proxy` publishes port `0.0.0.0:8443` so that remote Foreman Proxies and clients can register and communicate with it from outside the host. + +### Network membership summary + +| Container | foreman-db | foreman-cache | foreman-app | foreman-proxy-net | +|-----------|:----------:|:-------------:|:-----------:|:-----------------:| +| `postgresql` | ✓ | | | | +| `redis` | | ✓ | | | +| `candlepin` | ✓ (internal DB) | | ✓ | | +| `foreman` | ✓ | ✓ | ✓ | ✓ | +| `dynflow-sidekiq@*` | ✓ | ✓ | ✓ | | +| `foreman-recurring@*` | ✓ | ✓ | ✓ | | +| `foreman-proxy` | | | | ✓ | +| `pulp-api` | ✓ | ✓ | ✓ | | +| `pulp-content` | ✓ | ✓ | ✓ | | +| `pulp-worker@*` | ✓ | ✓ | ✓ | |