From b3fd6eddc9c5497504b09cd1cb5aa9bc8c2b9842 Mon Sep 17 00:00:00 2001 From: Christian Chwala Date: Thu, 30 Apr 2026 08:23:23 +0200 Subject: [PATCH 1/6] feat(grafana): auth proxy + per-user PostgreSQL datasources (PR8) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - grafana/provisioning/datasources/postgres.yml: replace single myuser datasource with per-user datasources (demo_openmrg, demo_orange_cameroun) each connecting as the matching PG login role, plus an admin datasource connecting as webserver_role for cross-tenant operator dashboards. - docker-compose.yml: enable Grafana auth proxy mode (GF_AUTH_PROXY_ENABLED), trust X-WEBAUTH-USER header from the webserver container only (GF_AUTH_PROXY_WHITELIST=webserver), disable anonymous access and the Grafana login form. - webserver/main.py (grafana_proxy): strip any X-WEBAUTH-USER header supplied by the browser (prevents identity forgery), then inject current_user.id so Grafana maps the request to the correct Grafana user. Data isolation chain: Flask session → X-WEBAUTH-USER → Grafana user → per-user datasource → PG role → RLS / security-barrier views --- docker-compose.yml | 15 +++++-- grafana/provisioning/datasources/postgres.yml | 43 ++++++++++++++++--- webserver/main.py | 14 +++++- 3 files changed, 61 insertions(+), 11 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 9b829e7..e82da70 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -239,14 +239,23 @@ services: - GF_SECURITY_ADMIN_PASSWORD=admin - GF_INSTALL_PLUGINS=grafana-piechart-panel - GF_USERS_ALLOW_SIGN_UP=false - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Viewer - GF_SECURITY_ALLOW_EMBEDDING=true - GF_SERVER_ROOT_URL=http://localhost:5000/grafana/ - GF_SERVER_SERVE_FROM_SUB_PATH=true - - GF_AUTH_DISABLE_LOGIN_FORM=true - GF_SECURITY_COOKIE_SAMESITE=none - GF_SECURITY_COOKIE_SECURE=false + # Auth proxy — Grafana trusts the X-WEBAUTH-USER header injected by the + # Flask proxy (webserver service). Anonymous access and the login form are + # disabled so Grafana itself is not reachable without going through the proxy. + - GF_AUTH_PROXY_ENABLED=true + - GF_AUTH_PROXY_HEADER_NAME=X-WEBAUTH-USER + - GF_AUTH_PROXY_HEADER_PROPERTY=username + - GF_AUTH_PROXY_AUTO_SIGN_UP=true + # Restrict trusted header to requests coming from the webserver container. + # In Docker Compose the value is the service hostname; Grafana resolves it. + - GF_AUTH_PROXY_WHITELIST=webserver + - GF_AUTH_DISABLE_LOGIN_FORM=true + - GF_AUTH_ANONYMOUS_ENABLED=false volumes: - grafana_data:/var/lib/grafana - ./grafana/provisioning:/etc/grafana/provisioning diff --git a/grafana/provisioning/datasources/postgres.yml b/grafana/provisioning/datasources/postgres.yml index 6c78f22..1c2c043 100644 --- a/grafana/provisioning/datasources/postgres.yml +++ b/grafana/provisioning/datasources/postgres.yml @@ -1,17 +1,48 @@ apiVersion: 1 datasources: - - name: PostgreSQL - uid: PostgreSQL + # Per-user datasources — each connects as the matching PostgreSQL login role. + # RLS on cml_metadata/cml_stats and security-barrier views on cml_data ensure + # that queries only return rows owned by that role (user_id = current_user). + - name: demo_openmrg + uid: ds_demo_openmrg type: grafana-postgresql-datasource access: proxy url: database:5432 database: mydatabase - user: myuser + user: demo_openmrg secureJsonData: - password: mypassword + password: demo_openmrg_password jsonData: sslmode: disable - defaultDatabase: mydatabase - isDefault: true + editable: true + + - name: demo_orange_cameroun + uid: ds_demo_orange_cameroun + type: grafana-postgresql-datasource + access: proxy + url: database:5432 + database: mydatabase + user: demo_orange_cameroun + secureJsonData: + password: demo_orange_cameroun_password + jsonData: + sslmode: disable + editable: true + + # Admin datasource — connects as webserver_role. + # No RLS bypass: webserver_role sees all tenants' data and is intended for + # cross-tenant / operator dashboards only. Not assigned to individual users. + - name: admin + uid: ds_admin + type: grafana-postgresql-datasource + access: proxy + url: database:5432 + database: mydatabase + user: webserver_role + secureJsonData: + password: webserverpassword + jsonData: + sslmode: disable + isDefault: false editable: true diff --git a/webserver/main.py b/webserver/main.py index 64e3a48..2fa9488 100644 --- a/webserver/main.py +++ b/webserver/main.py @@ -419,10 +419,20 @@ def grafana_root_redirect(): ) @login_required def grafana_proxy(path): - """Proxy all requests to Grafana container.""" + """Proxy all requests to Grafana container. + + Injects X-WEBAUTH-USER so Grafana's auth proxy mode maps the request to + the correct Grafana user. Any X-WEBAUTH-USER header sent by the browser + is stripped first to prevent identity forgery. + """ grafana_url = f"http://grafana:3000/grafana/{path}" method = request.method - headers = {key: value for key, value in request.headers if key.lower() != "host"} + headers = { + key: value + for key, value in request.headers + if key.lower() not in ("host", "x-webauth-user") + } + headers["X-WEBAUTH-USER"] = current_user.id data = request.get_data() params = request.args From 79dc2ddd648996d7cdd3581268078102894fac91 Mon Sep 17 00:00:00 2001 From: Christian Chwala Date: Thu, 30 Apr 2026 08:45:42 +0200 Subject: [PATCH 2/6] test(grafana): grafana_proxy header injection tests --- webserver/tests/test_grafana_proxy.py | 89 +++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 webserver/tests/test_grafana_proxy.py diff --git a/webserver/tests/test_grafana_proxy.py b/webserver/tests/test_grafana_proxy.py new file mode 100644 index 0000000..41afce8 --- /dev/null +++ b/webserver/tests/test_grafana_proxy.py @@ -0,0 +1,89 @@ +import os +import sys +from unittest.mock import Mock + +import pytest +from werkzeug.security import generate_password_hash + +sys.modules.setdefault("folium", Mock()) +sys.modules.setdefault("requests", Mock()) + +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) +import main as wm # noqa: E402 + + +def _make_grafana_response(status=200, content=b"ok", headers=None): + """Build a minimal requests.Response-like mock.""" + resp = Mock() + resp.status_code = status + resp.content = content + resp.headers = headers or {"Content-Type": "text/plain"} + return resp + + +@pytest.fixture +def logged_in_client(monkeypatch): + """Test client with demo_openmrg actually logged in via the login route. + + Uses a real session so flask_login's current_user proxy resolves correctly + inside the grafana_proxy route handler. + """ + monkeypatch.setitem( + wm.USERS, + "demo_openmrg", + { + "password_hash": generate_password_hash("testpass"), + "display_name": "OpenMRG", + }, + ) + wm.app.config["TESTING"] = True + client = wm.app.test_client() + client.post("/login", data={"username": "demo_openmrg", "password": "testpass"}) + return client + + +def test_grafana_proxy_injects_webauth_user_header(logged_in_client, monkeypatch): + """X-WEBAUTH-USER must be set to the logged-in user's id.""" + mock_requests = Mock() + mock_requests.request.return_value = _make_grafana_response() + monkeypatch.setattr(wm, "requests", mock_requests) + + logged_in_client.get("/grafana/") + + _, kwargs = mock_requests.request.call_args + assert kwargs["headers"]["X-WEBAUTH-USER"] == "demo_openmrg" + + +def test_grafana_proxy_strips_client_webauth_user_header(logged_in_client, monkeypatch): + """A browser-supplied X-WEBAUTH-USER must be removed before forwarding.""" + mock_requests = Mock() + mock_requests.request.return_value = _make_grafana_response() + monkeypatch.setattr(wm, "requests", mock_requests) + + logged_in_client.get("/grafana/", headers={"X-WEBAUTH-USER": "attacker"}) + + _, kwargs = mock_requests.request.call_args + # The injected value must be the server-controlled user id, not the + # attacker-supplied value. The case-insensitive strip must have fired. + assert kwargs["headers"]["X-WEBAUTH-USER"] == "demo_openmrg" + + +def test_grafana_proxy_unauthenticated_redirects_to_login(): + """Unauthenticated requests must not reach Grafana at all.""" + wm.app.config["TESTING"] = True + client = wm.app.test_client() + resp = client.get("/grafana/") + assert resp.status_code == 302 + assert "login" in resp.headers["Location"] + + +def test_grafana_proxy_forwards_path(logged_in_client, monkeypatch): + """The full subpath must be forwarded to the Grafana container URL.""" + mock_requests = Mock() + mock_requests.request.return_value = _make_grafana_response() + monkeypatch.setattr(wm, "requests", mock_requests) + + logged_in_client.get("/grafana/d/abc123/my-dashboard") + + args, _ = mock_requests.request.call_args + assert args[1] == "http://grafana:3000/grafana/d/abc123/my-dashboard" From 5de1dd7c4bc1c20590f63c97bcbe83cb07c0cbe9 Mon Sep 17 00:00:00 2001 From: Christian Chwala Date: Thu, 30 Apr 2026 09:13:52 +0200 Subject: [PATCH 3/6] fix: remove invalid auth proxy whitelist hostname GF_AUTH_PROXY_WHITELIST requires IP/CIDR notation; passing the service hostname 'webserver' caused Grafana to log: 'could not parse the network: invalid CIDR address: webserver/32' and silently skip auth proxy initialization entirely. All iframe requests therefore arrived as anonymous (userId=0) and were redirected to the Grafana welcome page instead of the dashboard. Fix: remove the whitelist entry and the host port-3000 mapping. Port 3000 is now only reachable from within the Docker Compose network, providing equivalent isolation without the broken setting. --- docker-compose.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index e82da70..3fb4a20 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -231,8 +231,6 @@ services: grafana: image: grafana/grafana:latest - ports: - - "3000:3000" depends_on: - database environment: @@ -251,9 +249,8 @@ services: - GF_AUTH_PROXY_HEADER_NAME=X-WEBAUTH-USER - GF_AUTH_PROXY_HEADER_PROPERTY=username - GF_AUTH_PROXY_AUTO_SIGN_UP=true - # Restrict trusted header to requests coming from the webserver container. - # In Docker Compose the value is the service hostname; Grafana resolves it. - - GF_AUTH_PROXY_WHITELIST=webserver + # No whitelist needed: port 3000 is not exposed to the host, so only + # containers in the same Docker Compose network can reach Grafana. - GF_AUTH_DISABLE_LOGIN_FORM=true - GF_AUTH_ANONYMOUS_ENABLED=false volumes: From 2f6aa1dd971d6a728db4e902e8510cbe380bc09d Mon Sep 17 00:00:00 2001 From: Christian Chwala Date: Thu, 30 Apr 2026 10:46:33 +0200 Subject: [PATCH 4/6] feat: multi-tenant Grafana setup with per-org data isolation - docker-compose.yml: fix Grafana healthcheck regex; add init_grafana service that bootstraps org 2 via API on startup - grafana/init_grafana.py: new bootstrap script - creates org 2, datasource, copies dashboards from org 1, creates tenant users - grafana/Dockerfile: image for init_grafana service - grafana/provisioning/dashboards/dashboards.yml: remove org 2 entry (was crashing Grafana at startup with org.notFound); org 2 handled by init_grafana - grafana/provisioning/datasources/postgres.yml: remove org 2 entry (same reason); simplify org 1 config - grafana/provisioning/dashboards/definitions/*.json: replace cml_data_1h aggregate with cml_data_1h_secure security-barrier view so tenant DB roles can query without permission errors - webserver/configs/users.json: add grafana_org_id field per user - webserver/main.py: read grafana_org_id from user config; pass to tem- webserver/main.py: read grafana_org_id from user config; pass to tem- webserver/main.py: read grafana_org_id from user config; patml- webserver/main.py: read grafana_org_id from user config; pass to tem- websing migration --- docker-compose.yml | 16 ++ grafana/Dockerfile | 4 + grafana/init_grafana.py | 271 ++++++++++++++++++ .../provisioning/dashboards/dashboards.yml | 5 +- .../dashboards/definitions/cml-archive.json | 27 +- .../dashboards/definitions/cml-metadata.json | 16 +- .../dashboards/definitions/cml-raw-data.json | 16 +- .../dashboards/definitions/cml-realtime.json | 58 ++-- .../dashboards/definitions/data-quality.json | 29 +- .../dashboards/definitions/test.json | 18 +- grafana/provisioning/datasources/postgres.yml | 53 ++-- scripts/update_dashboards_datasource.py | 60 ++++ webserver/configs/users.json | 6 +- webserver/main.py | 6 +- webserver/templates/archive.html | 2 +- webserver/templates/realtime.html | 2 +- 16 files changed, 506 insertions(+), 83 deletions(-) create mode 100644 grafana/Dockerfile create mode 100644 grafana/init_grafana.py create mode 100644 scripts/update_dashboards_datasource.py diff --git a/docker-compose.yml b/docker-compose.yml index 3fb4a20..de7cfab 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -256,6 +256,22 @@ services: volumes: - grafana_data:/var/lib/grafana - ./grafana/provisioning:/etc/grafana/provisioning + healthcheck: + test: ["CMD-SHELL", "wget -qO- http://localhost:3000/api/health | grep -q 'database'"] + interval: 5s + timeout: 5s + retries: 20 + + # One-shot bootstrap: creates org 2, pre-creates Grafana users in their correct + # org, then exits. Runs every time the stack starts (idempotent). + init_grafana: + build: + context: ./grafana + dockerfile: Dockerfile + depends_on: + grafana: + condition: service_healthy + restart: "no" # Integration test runner (run with: docker compose run --rm integration_tests) integration_tests: diff --git a/grafana/Dockerfile b/grafana/Dockerfile new file mode 100644 index 0000000..fce0fd8 --- /dev/null +++ b/grafana/Dockerfile @@ -0,0 +1,4 @@ +FROM python:3.11-slim +RUN pip install --no-cache-dir requests +COPY init_grafana.py /init_grafana.py +CMD ["python", "/init_grafana.py"] diff --git a/grafana/init_grafana.py b/grafana/init_grafana.py new file mode 100644 index 0000000..46f1d4d --- /dev/null +++ b/grafana/init_grafana.py @@ -0,0 +1,271 @@ +""" +Grafana bootstrap script — run once at stack startup. + +Creates the per-tenant Grafana Organisations and pre-creates Grafana users so +each user is automatically placed in their correct org with Viewer role. + +Layout: + Org 1 (default, id=1) — demo_openmrg + Org 2 — demo_orange_cameroun + +Org 1 datasource is provisioned from grafana/provisioning/datasources/postgres.yml. +Org 2 datasource and dashboards are created via the Grafana API by this script. +""" + +import sys +import time +import requests + +GRAFANA_URL = "http://grafana:3000" +ADMIN_AUTH = ("admin", "admin") + +ORGS = [ + {"id": 1, "name": "demo_openmrg"}, + {"id": 2, "name": "demo_orange_cameroun"}, +] + +USERS = [ + {"login": "demo_openmrg", "org_id": 1, "role": "Viewer"}, + {"login": "demo_orange_cameroun", "org_id": 2, "role": "Viewer"}, +] + + +def wait_for_grafana(timeout=120): + deadline = time.time() + timeout + while time.time() < deadline: + try: + r = requests.get(GRAFANA_URL + "/api/health", timeout=5) + if r.status_code == 200 and r.json().get("database") == "ok": + print("Grafana is up.") + return + except Exception: + pass + print("Waiting for Grafana...") + time.sleep(3) + print("ERROR: Grafana did not become healthy in time.", file=sys.stderr) + sys.exit(1) + + +def get_or_create_org(org_id, org_name): + """Ensure an org with the given numeric id and name exists.""" + r = requests.get(f"{GRAFANA_URL}/api/orgs/{org_id}", auth=ADMIN_AUTH) + if r.status_code == 200: + print(f"Org {org_id} ({org_name}) already exists.") + return + # Org id=1 always exists; for id>=2 we create by name. + r = requests.post( + f"{GRAFANA_URL}/api/orgs", + json={"name": org_name}, + auth=ADMIN_AUTH, + ) + if r.status_code not in (200, 409): + print( + f"ERROR creating org {org_name}: {r.status_code} {r.text}", file=sys.stderr + ) + sys.exit(1) + print(f"Created org {org_name}.") + + +def rename_default_org(): + """Rename org 1 from 'Main Org.' to 'demo_openmrg'.""" + r = requests.put( + f"{GRAFANA_URL}/api/orgs/1", + json={"name": "demo_openmrg"}, + auth=ADMIN_AUTH, + ) + if r.status_code not in (200,): + print(f"WARN: could not rename org 1: {r.status_code} {r.text}") + else: + print("Renamed org 1 to demo_openmrg.") + + +def get_or_create_user(login, org_id, role): + """ + Pre-create the Grafana user (so we can control org membership before the + user ever logs in via the auth proxy). + """ + # Look up by login + r = requests.get( + f"{GRAFANA_URL}/api/users/lookup?loginOrEmail={login}", auth=ADMIN_AUTH + ) + if r.status_code == 200: + user_id = r.json()["id"] + print(f"User {login} already exists (id={user_id}).") + elif r.status_code == 404: + # Create the user — password is irrelevant (auth proxy is used at runtime) + r2 = requests.post( + f"{GRAFANA_URL}/api/admin/users", + json={ + "login": login, + "name": login, + "password": "change-me-proxy-only", + "OrgId": org_id, + }, + auth=ADMIN_AUTH, + ) + if r2.status_code not in (200,): + print( + f"ERROR creating user {login}: {r2.status_code} {r2.text}", + file=sys.stderr, + ) + sys.exit(1) + user_id = r2.json()["id"] + print(f"Created user {login} (id={user_id}).") + else: + print( + f"ERROR looking up user {login}: {r.status_code} {r.text}", file=sys.stderr + ) + sys.exit(1) + + assign_user_to_org(user_id, login, org_id, role) + + +def assign_user_to_org(user_id, login, org_id, role): + """Remove user from all other orgs, then add/confirm in target org.""" + # Get current orgs for this user + r = requests.get(f"{GRAFANA_URL}/api/users/{user_id}/orgs", auth=ADMIN_AUTH) + current_orgs = {o["orgId"] for o in r.json()} if r.status_code == 200 else set() + + # Ensure user is in the target org with the correct role + if org_id not in current_orgs: + r2 = requests.post( + f"{GRAFANA_URL}/api/orgs/{org_id}/users", + json={"loginOrEmail": login, "role": role}, + auth=ADMIN_AUTH, + ) + if r2.status_code not in (200,): + print( + f"WARN: could not add {login} to org {org_id}: {r2.status_code} {r2.text}" + ) + else: + # Patch role to be sure + requests.patch( + f"{GRAFANA_URL}/api/orgs/{org_id}/users/{user_id}", + json={"role": role}, + auth=ADMIN_AUTH, + ) + print(f"User {login} already in org {org_id}.") + + # Remove from all orgs that are not the target + for other_org_id in current_orgs - {org_id}: + r3 = requests.delete( + f"{GRAFANA_URL}/api/orgs/{other_org_id}/users/{user_id}", + auth=ADMIN_AUTH, + ) + print(f"Removed {login} from org {other_org_id}: {r3.status_code}") + + # Set this org as the user's current/default org + requests.post( + f"{GRAFANA_URL}/api/users/{user_id}/using/{org_id}", + auth=ADMIN_AUTH, + ) + print(f"Set org {org_id} as default for {login}.") + + +def trigger_provisioning_reload(): + """Ask Grafana to re-read its provisioning files (datasources + dashboards).""" + for resource in ("datasources", "dashboards"): + r = requests.post( + f"{GRAFANA_URL}/api/admin/provisioning/{resource}/reload", + auth=ADMIN_AUTH, + ) + print(f"Reload {resource}: {r.status_code}") + + +def create_datasource_for_org(org_id, name, uid, user, password): + """Create a PostgreSQL datasource for the given org via API.""" + r = requests.post( + f"{GRAFANA_URL}/api/datasources", + json={ + "name": name, + "uid": uid, + "type": "grafana-postgresql-datasource", + "access": "proxy", + "url": "database:5432", + "database": "mydatabase", + "user": user, + "secureJsonData": {"password": password}, + "jsonData": {"sslmode": "disable"}, + "isDefault": True, + "editable": False, + }, + auth=ADMIN_AUTH, + headers={"X-Grafana-Org-Id": str(org_id)}, + ) + if r.status_code in (200,): + print(f"Created datasource '{name}' for org {org_id}.") + elif r.status_code == 409: + print(f"Datasource '{name}' already exists in org {org_id}.") + else: + print( + f"WARN: could not create datasource for org {org_id}: {r.status_code} {r.text}" + ) + + +def copy_dashboards_to_org(target_org_id, source_org_id=1): + """Copy all dashboards from source_org_id into target_org_id via API.""" + r = requests.get( + f"{GRAFANA_URL}/api/search?type=dash-db", + auth=ADMIN_AUTH, + headers={"X-Grafana-Org-Id": str(source_org_id)}, + ) + if r.status_code != 200: + print( + f"WARN: could not list dashboards in org {source_org_id}: {r.status_code}" + ) + return + dashboards = r.json() + print( + f"Copying {len(dashboards)} dashboards from org {source_org_id} to org {target_org_id}..." + ) + for db in dashboards: + uid = db.get("uid") + if not uid: + continue + r2 = requests.get( + f"{GRAFANA_URL}/api/dashboards/uid/{uid}", + auth=ADMIN_AUTH, + headers={"X-Grafana-Org-Id": str(source_org_id)}, + ) + if r2.status_code != 200: + print(f"WARN: could not fetch dashboard {uid}: {r2.status_code}") + continue + dashboard_json = r2.json()["dashboard"] + dashboard_json.pop("id", None) # remove source org's internal id + r3 = requests.post( + f"{GRAFANA_URL}/api/dashboards/db", + json={"dashboard": dashboard_json, "overwrite": True, "folderId": 0}, + auth=ADMIN_AUTH, + headers={"X-Grafana-Org-Id": str(target_org_id)}, + ) + if r3.status_code != 200: + print( + f"WARN: could not import dashboard {uid} to org {target_org_id}: {r3.status_code} {r3.text}" + ) + else: + print(f"Copied dashboard {uid} to org {target_org_id}.") + + +if __name__ == "__main__": + wait_for_grafana() + rename_default_org() + for org in ORGS[1:]: # org 1 always exists; create 2+ + get_or_create_org(org["id"], org["name"]) + # Create datasource for org 2 via API (cannot use provisioning file as org 2 + # does not exist when Grafana starts) + create_datasource_for_org( + org_id=2, + name="PostgreSQL", + uid="ds_demo_orange_cameroun", + user="demo_orange_cameroun", + password="demo_orange_cameroun_password", + ) + # Reload provisioning (dashboards for org 1) + time.sleep(2) + trigger_provisioning_reload() + time.sleep(2) + # Copy dashboards from org 1 into org 2 + copy_dashboards_to_org(target_org_id=2, source_org_id=1) + for user in USERS: + get_or_create_user(user["login"], user["org_id"], user["role"]) + print("Grafana bootstrap complete.") diff --git a/grafana/provisioning/dashboards/dashboards.yml b/grafana/provisioning/dashboards/dashboards.yml index 0383e26..f21d8f8 100644 --- a/grafana/provisioning/dashboards/dashboards.yml +++ b/grafana/provisioning/dashboards/dashboards.yml @@ -1,7 +1,7 @@ apiVersion: 1 providers: - - name: 'CML Dashboards' + - name: 'CML Dashboards (org 1)' orgId: 1 folder: '' type: file @@ -11,3 +11,6 @@ providers: allowUiUpdates: false options: path: /etc/grafana/provisioning/dashboards/definitions + + # Dashboards for org 2 are copied via the Grafana API by the init_grafana + # service after org 2 is created (see grafana/init_grafana.py). diff --git a/grafana/provisioning/dashboards/definitions/cml-archive.json b/grafana/provisioning/dashboards/definitions/cml-archive.json index b76ad5c..f2543f6 100644 --- a/grafana/provisioning/dashboards/definitions/cml-archive.json +++ b/grafana/provisioning/dashboards/definitions/cml-archive.json @@ -18,7 +18,7 @@ "type": "timeseries", "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "gridPos": { "h": 9, @@ -30,11 +30,11 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, - "rawSql": "SELECT\n bucket AS \"time\",\n 'sublinks' AS metric,\n COUNT(*) AS value\nFROM cml_data_1h\nWHERE bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nGROUP BY bucket\nORDER BY 1 ASC", + "rawSql": "SELECT\n bucket AS \"time\",\n 'sublinks' AS metric,\n COUNT(*) AS value\nFROM cml_data_1h_secure\nWHERE bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nGROUP BY bucket\nORDER BY 1 ASC", "refId": "A" } ], @@ -105,7 +105,7 @@ "type": "timeseries", "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "gridPos": { "h": 9, @@ -117,11 +117,11 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, - "rawSql": "SELECT\n bucket AS \"time\",\n 'data points' AS metric,\n COUNT(*) * 360 AS value\nFROM cml_data_1h\nWHERE bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nGROUP BY bucket\nORDER BY 1 ASC", + "rawSql": "SELECT\n bucket AS \"time\",\n 'data points' AS metric,\n COUNT(*) * 360 AS value\nFROM cml_data_1h_secure\nWHERE bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nGROUP BY bucket\nORDER BY 1 ASC", "refId": "A" } ], @@ -188,7 +188,20 @@ } ], "templating": { - "list": [] + "list": [ + { + "current": {}, + "hide": 2, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "grafana-postgresql-datasource", + "refresh": 1, + "type": "datasource", + "label": "Datasource" + } + ] }, "annotations": { "list": [] diff --git a/grafana/provisioning/dashboards/definitions/cml-metadata.json b/grafana/provisioning/dashboards/definitions/cml-metadata.json index c4e3916..34fa5de 100644 --- a/grafana/provisioning/dashboards/definitions/cml-metadata.json +++ b/grafana/provisioning/dashboards/definitions/cml-metadata.json @@ -25,7 +25,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -82,7 +82,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "editorMode": "code", "format": "table", @@ -120,6 +120,18 @@ ], "templating": { "list": [ + { + "current": {}, + "hide": 2, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "grafana-postgresql-datasource", + "refresh": 1, + "type": "datasource", + "label": "Datasource" + }, { "current": { "selected": false, diff --git a/grafana/provisioning/dashboards/definitions/cml-raw-data.json b/grafana/provisioning/dashboards/definitions/cml-raw-data.json index 1a258eb..89e7091 100644 --- a/grafana/provisioning/dashboards/definitions/cml-raw-data.json +++ b/grafana/provisioning/dashboards/definitions/cml-raw-data.json @@ -25,7 +25,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -109,7 +109,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "editorMode": "code", "format": "table", @@ -147,6 +147,18 @@ ], "templating": { "list": [ + { + "current": {}, + "hide": 2, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "grafana-postgresql-datasource", + "refresh": 1, + "type": "datasource", + "label": "Datasource" + }, { "current": { "selected": false, diff --git a/grafana/provisioning/dashboards/definitions/cml-realtime.json b/grafana/provisioning/dashboards/definitions/cml-realtime.json index dff6b04..2abacc0 100644 --- a/grafana/provisioning/dashboards/definitions/cml-realtime.json +++ b/grafana/provisioning/dashboards/definitions/cml-realtime.json @@ -25,7 +25,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -56,7 +56,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "gridPos": { "h": 1, @@ -78,7 +78,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -369,37 +369,37 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, - "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' min' AS metric,\n rsl_min AS value\nFROM cml_data_1h\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", + "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' min' AS metric,\n rsl_min AS value\nFROM cml_data_1h_secure\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", "refId": "A" }, { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, - "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' max' AS metric,\n rsl_max AS value\nFROM cml_data_1h\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", + "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' max' AS metric,\n rsl_max AS value\nFROM cml_data_1h_secure\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", "refId": "B" }, { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, - "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' avg' AS metric,\n rsl_avg AS value\nFROM cml_data_1h\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", + "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' avg' AS metric,\n rsl_avg AS value\nFROM cml_data_1h_secure\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", "refId": "C" }, { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, @@ -409,7 +409,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, @@ -423,7 +423,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -714,37 +714,37 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, - "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' min' AS metric,\n tsl_min AS value\nFROM cml_data_1h\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", + "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' min' AS metric,\n tsl_min AS value\nFROM cml_data_1h_secure\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", "refId": "A" }, { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, - "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' max' AS metric,\n tsl_max AS value\nFROM cml_data_1h\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", + "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' max' AS metric,\n tsl_max AS value\nFROM cml_data_1h_secure\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", "refId": "B" }, { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, - "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' avg' AS metric,\n tsl_avg AS value\nFROM cml_data_1h\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", + "rawSql": "SELECT\n bucket AS \"time\",\n sublink_id || ' avg' AS metric,\n tsl_avg AS value\nFROM cml_data_1h_secure\nWHERE cml_id = '${cml_id}'\n AND '${interval}' = 'auto'\n AND EXTRACT(EPOCH FROM ($__timeTo()::timestamptz - $__timeFrom()::timestamptz)) > 259200\n AND bucket >= $__timeFrom()::timestamptz\n AND bucket <= $__timeTo()::timestamptz\nORDER BY 1 ASC", "refId": "C" }, { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, @@ -754,7 +754,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, @@ -768,7 +768,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -862,7 +862,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "table", "rawQuery": true, @@ -884,6 +884,18 @@ ], "templating": { "list": [ + { + "current": {}, + "hide": 2, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "grafana-postgresql-datasource", + "refresh": 1, + "type": "datasource", + "label": "Datasource" + }, { "current": { "selected": false, @@ -892,7 +904,7 @@ }, "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "definition": "SELECT DISTINCT cml_id::text FROM cml_metadata ORDER BY cml_id", "description": null, diff --git a/grafana/provisioning/dashboards/definitions/data-quality.json b/grafana/provisioning/dashboards/definitions/data-quality.json index 5f4dc2f..818b062 100644 --- a/grafana/provisioning/dashboards/definitions/data-quality.json +++ b/grafana/provisioning/dashboards/definitions/data-quality.json @@ -25,7 +25,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -56,7 +56,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -163,7 +163,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "time_series", "rawQuery": true, @@ -177,7 +177,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -230,7 +230,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "table", "rawQuery": true, @@ -244,7 +244,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -323,7 +323,7 @@ { "datasource": { "type": "grafana-postgresql-datasource", - "uid": "PostgreSQL" + "uid": "${datasource}" }, "format": "table", "rawQuery": true, @@ -343,7 +343,20 @@ "statistics" ], "templating": { - "list": [] + "list": [ + { + "current": {}, + "hide": 2, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "grafana-postgresql-datasource", + "refresh": 1, + "type": "datasource", + "label": "Datasource" + } + ] }, "time": { "from": "2015-08-27T00:00:00Z", diff --git a/grafana/provisioning/dashboards/definitions/test.json b/grafana/provisioning/dashboards/definitions/test.json index 4178119..0c17b75 100644 --- a/grafana/provisioning/dashboards/definitions/test.json +++ b/grafana/provisioning/dashboards/definitions/test.json @@ -10,5 +10,21 @@ "from": "now-6h", "to": "now" }, - "panels": [] + "panels": [], + "templating": { + "list": [ + { + "current": {}, + "hide": 2, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "grafana-postgresql-datasource", + "refresh": 1, + "type": "datasource", + "label": "Datasource" + } + ] + } } \ No newline at end of file diff --git a/grafana/provisioning/datasources/postgres.yml b/grafana/provisioning/datasources/postgres.yml index 1c2c043..1267375 100644 --- a/grafana/provisioning/datasources/postgres.yml +++ b/grafana/provisioning/datasources/postgres.yml @@ -1,13 +1,24 @@ apiVersion: 1 datasources: - # Per-user datasources — each connects as the matching PostgreSQL login role. - # RLS on cml_metadata/cml_stats and security-barrier views on cml_data ensure - # that queries only return rows owned by that role (user_id = current_user). - - name: demo_openmrg + # Each Grafana organization has exactly ONE PostgreSQL datasource connecting as + # the matching PG login role. RLS on cml_metadata/cml_stats and security-barrier + # views (cml_data_secure, cml_data_1h_secure) scope all queries to that role's + # data automatically. + # + # Dashboards use a ${datasource} template variable of type "datasource" filtered + # to grafana-postgresql-datasource. Because each org has only one such datasource, + # the variable auto-selects the correct one — no user interaction required. + # + # Org 1 (demo_openmrg) datasource is provisioned from this file. + # Org 2 (demo_orange_cameroun) datasource is created via API by init_grafana. + + # Org 1 — demo_openmrg + - name: PostgreSQL uid: ds_demo_openmrg type: grafana-postgresql-datasource access: proxy + orgId: 1 url: database:5432 database: mydatabase user: demo_openmrg @@ -15,34 +26,8 @@ datasources: password: demo_openmrg_password jsonData: sslmode: disable - editable: true - - - name: demo_orange_cameroun - uid: ds_demo_orange_cameroun - type: grafana-postgresql-datasource - access: proxy - url: database:5432 - database: mydatabase - user: demo_orange_cameroun - secureJsonData: - password: demo_orange_cameroun_password - jsonData: - sslmode: disable - editable: true + isDefault: true + editable: false - # Admin datasource — connects as webserver_role. - # No RLS bypass: webserver_role sees all tenants' data and is intended for - # cross-tenant / operator dashboards only. Not assigned to individual users. - - name: admin - uid: ds_admin - type: grafana-postgresql-datasource - access: proxy - url: database:5432 - database: mydatabase - user: webserver_role - secureJsonData: - password: webserverpassword - jsonData: - sslmode: disable - isDefault: false - editable: true +# Datasource for org 2 (demo_orange_cameroun) is created via the Grafana API +# by the init_grafana service after org 2 is created (see grafana/init_grafana.py). diff --git a/scripts/update_dashboards_datasource.py b/scripts/update_dashboards_datasource.py new file mode 100644 index 0000000..cf19f43 --- /dev/null +++ b/scripts/update_dashboards_datasource.py @@ -0,0 +1,60 @@ +""" +One-shot script: add a hidden ${datasource} template variable to every provisioned +dashboard and replace the hardcoded "uid": "PostgreSQL" datasource references with +"uid": "${datasource}". + +Usage: + python scripts/update_dashboards_datasource.py +""" + +import json +import glob + +DATASOURCE_VARIABLE = { + "current": {}, + "hide": 2, + "includeAll": False, + "multi": False, + "name": "datasource", + "options": [], + "query": "grafana-postgresql-datasource", + "refresh": 1, + "type": "datasource", + "label": "Datasource", +} + + +def replace_pg_uid(obj): + """Recursively replace uid='PostgreSQL' datasource refs with '${datasource}'.""" + if isinstance(obj, dict): + if ( + obj.get("type") == "grafana-postgresql-datasource" + and obj.get("uid") == "PostgreSQL" + ): + obj["uid"] = "${datasource}" + for v in obj.values(): + replace_pg_uid(v) + elif isinstance(obj, list): + for item in obj: + replace_pg_uid(item) + + +def add_datasource_var(d): + tmpl = d.setdefault("templating", {}) + var_list = tmpl.setdefault("list", []) + if any(v.get("name") == "datasource" for v in var_list): + return # already present + var_list.insert(0, DATASOURCE_VARIABLE) + + +pattern = "grafana/provisioning/dashboards/definitions/*.json" +for path in sorted(glob.glob(pattern)): + with open(path) as f: + d = json.load(f) + replace_pg_uid(d) + add_datasource_var(d) + with open(path, "w") as f: + json.dump(d, f, indent=4) + print("Updated {}".format(path)) + +print("Done.") diff --git a/webserver/configs/users.json b/webserver/configs/users.json index 65de9c3..e75c820 100644 --- a/webserver/configs/users.json +++ b/webserver/configs/users.json @@ -1,10 +1,12 @@ { "demo_openmrg": { "password_hash": "scrypt:32768:8:1$HLOwGuhFRtd4Dah3$f7dca30ff20c0da01f53569bf7396bdaec4bbd428ef1875f07791a857d57d8434c8fbfa67269453a0980769e1db6787ab46c01e8e33b57aa160d615db385a944", - "display_name": "OpenMRG Demo" + "display_name": "OpenMRG Demo", + "grafana_org_id": 1 }, "demo_orange_cameroun": { "password_hash": "scrypt:32768:8:1$EpwEQPpmJkYCDx4I$662c778d419645ac0f8be645b5af56543e1f47cfc5a4d33c261935db3750941d0ce9e107a5586415f112c5b31e57d85ada449d8c678a5de7169fce7854647a54", - "display_name": "Orange Cameroun Demo" + "display_name": "Orange Cameroun Demo", + "grafana_org_id": 2 } } \ No newline at end of file diff --git a/webserver/main.py b/webserver/main.py index 2fa9488..42f00bb 100644 --- a/webserver/main.py +++ b/webserver/main.py @@ -54,6 +54,7 @@ class User(UserMixin): def __init__(self, user_id: str): self.id = user_id self.display_name = USERS[user_id].get("display_name", user_id) + self.grafana_org_id = USERS[user_id].get("grafana_org_id", 1) @login_manager.user_loader @@ -399,6 +400,7 @@ def realtime(): map_html=map_html, cmls=cmls, selected_cml=default_cml, + grafana_org_id=current_user.grafana_org_id, ) @@ -638,7 +640,9 @@ def get_archive_statistics(user_id: str): def archive(): """Archive statistics page""" stats = get_archive_statistics(current_user.id) - return render_template("archive.html", stats=stats) + return render_template( + "archive.html", stats=stats, grafana_org_id=current_user.grafana_org_id + ) # ==================== DATA UPLOADS ROUTES ==================== diff --git a/webserver/templates/archive.html b/webserver/templates/archive.html index e350d35..34732c8 100644 --- a/webserver/templates/archive.html +++ b/webserver/templates/archive.html @@ -34,7 +34,7 @@
-
diff --git a/webserver/templates/realtime.html b/webserver/templates/realtime.html index feb21b2..3fc3d1f 100644 --- a/webserver/templates/realtime.html +++ b/webserver/templates/realtime.html @@ -102,7 +102,7 @@
From c0e7e5669eeb4cb045bc019dd338dc8827fb8fc1 Mon Sep 17 00:00:00 2001 From: Christian Chwala Date: Thu, 30 Apr 2026 10:50:12 +0200 Subject: [PATCH 5/6] docs: add multi-tenancy section to README with deployment-repo onboarding guide --- README.md | 137 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/README.md b/README.md index 06b0aa6..747a711 100644 --- a/README.md +++ b/README.md @@ -122,3 +122,140 @@ environment: - STORAGE_S3_ENDPOINT=http://minio:9000 ``` +## Multi-Tenancy + +Each tenant has: +- a PostgreSQL login role whose name **equals** the `user_id` stored in the data tables +- a Grafana organisation (org) with a dedicated datasource connecting as that role +- a Flask login account in `webserver/configs/users.json` + +Row-Level Security on `cml_metadata` and `cml_stats`, plus the +`cml_data_1h_secure` security-barrier view, ensure each DB role only reads its +own data without any application-level filtering. + +### Adding a new tenant from a deployment repo + +The canonical deployment pattern is a **separate git repo** that includes this +repo as a git submodule and overrides configuration with a +`docker-compose.override.yml`. + +#### 1. Database — add a migration in the deployment repo + +Create a SQL migration file (e.g. `migrations/008_add_acme.sql`): + +```sql +-- Idempotent: safe to re-run +DO $$ +BEGIN + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'acme') THEN + CREATE ROLE acme LOGIN PASSWORD 'change-me-in-production'; + END IF; +END +$$; + +GRANT USAGE ON SCHEMA public TO acme; +GRANT SELECT, INSERT, UPDATE ON cml_data TO acme; +GRANT SELECT, INSERT, UPDATE ON cml_metadata TO acme; +GRANT SELECT, INSERT, UPDATE ON cml_stats TO acme; +GRANT EXECUTE ON FUNCTION update_cml_stats(TEXT, TEXT) TO acme; +GRANT SELECT ON cml_data_secure TO acme; +GRANT SELECT ON cml_data_1h_secure TO acme; +GRANT acme TO webserver_role; +``` + +Apply it to the running database: + +```sh +docker compose exec -T database psql -U myuser -d mydatabase \ + < migrations/008_add_acme.sql +``` + +No new RLS policies are needed; the generic `WHERE user_id = current_user` +policies cover every role automatically. + +#### 2. Grafana bootstrap — extend `init_grafana.py` via override + +In your deployment repo, create an override that replaces `ORGS` / `USERS` in +`grafana/init_grafana.py`, or mount a patched copy of the file. The simplest +approach is to extend via environment variables. Until `init_grafana.py` +supports env-driven tenant lists, the easiest override is to **replace the +script** with a deployment-repo copy that appends the new tenant: + +```python +# deployment-repo/grafana/init_grafana.py (copy of the upstream file + additions) + +ORGS = [ + {"id": 1, "name": "demo_openmrg"}, + {"id": 2, "name": "demo_orange_cameroun"}, + {"id": 3, "name": "acme"}, # ← new tenant +] + +USERS = [ + {"login": "demo_openmrg", "org_id": 1, "role": "Viewer"}, + {"login": "demo_orange_cameroun", "org_id": 2, "role": "Viewer"}, + {"login": "acme", "org_id": 3, "role": "Viewer"}, # ← new +] +``` + +And add the datasource + dashboard copy call in `__main__`: + +```python +create_datasource_for_org( + org_id=3, + name="PostgreSQL", + uid="ds_acme", + user="acme", + password="change-me-in-production", +) +copy_dashboards_to_org(target_org_id=3, source_org_id=1) +``` + +Mount the patched script via `docker-compose.override.yml`: + +```yaml +services: + init_grafana: + volumes: + - ./grafana/init_grafana.py:/app/init_grafana.py:ro +``` + +#### 3. Webserver users — mount an overridden `users.json` + +The deployment repo should provide its own `webserver/configs/users.json` +(already live-mounted, no rebuild needed): + +```json +{ + "demo_openmrg": { "password_hash": "scrypt:...", "display_name": "OpenMRG Demo", "grafana_org_id": 1 }, + "demo_orange_cameroun":{ "password_hash": "scrypt:...", "display_name": "Orange Cameroun Demo","grafana_org_id": 2 }, + "acme": { "password_hash": "scrypt:...", "display_name": "Acme Corp", "grafana_org_id": 3 } +} +``` + +Generate a password hash with: + +```sh +docker compose run --rm webserver python3 -c \ + "from werkzeug.security import generate_password_hash; print(generate_password_hash('your-password'))" +``` + +#### 4. SFTP keys — mount from the deployment repo + +The `sftp_receiver` reads authorised keys from `ssh_keys/authorized_keys` and +per-user key directories. Add the new tenant's public key there via the +deployment repo's volume mounts in `docker-compose.override.yml`. + +#### 5. Apply and restart + +```sh +# Apply the DB migration (only needed once per database volume lifetime) +docker compose exec -T database psql -U myuser -d mydatabase \ + < migrations/008_add_acme.sql + +# Restart so init_grafana re-runs bootstrap (creates org 3, datasource, copies dashboards) +docker compose restart init_grafana +# Or on a fresh stack: docker compose up -d +``` + +`init_grafana` is idempotent — re-running it on an existing stack is safe. + From 82c1f4f1e3803f095b0757827ddeff67d68ef447 Mon Sep 17 00:00:00 2001 From: Christian Chwala Date: Thu, 30 Apr 2026 14:07:06 +0200 Subject: [PATCH 6/6] refactor(ssh_keys): automate multi-tenant SSH key generation and authorized_keys setup - Refactored generate_ssh_keys.sh to loop over tenants, generate keys, and set up per-tenant authorized_keys - Ensured idempotency and clear output for onboarding new tenants - Updated authorized_keys for demo_openmrg and demo_orange_cameroun - Validated SFTP upload and stack health after changes This commit ensures robust, automated SSH key management for all tenants. --- ssh_keys/demo_openmrg/authorized_keys | 2 +- ssh_keys/demo_orange_cameroun/authorized_keys | 2 +- ssh_keys/generate_ssh_keys.sh | 99 ++++++++++--------- 3 files changed, 56 insertions(+), 47 deletions(-) diff --git a/ssh_keys/demo_openmrg/authorized_keys b/ssh_keys/demo_openmrg/authorized_keys index 382d5bc..e13a113 100644 --- a/ssh_keys/demo_openmrg/authorized_keys +++ b/ssh_keys/demo_openmrg/authorized_keys @@ -1 +1 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCvMZtEclPX0kKcyAqjDLYlKIPeHbaQqvhDP9aspS6PU5rXe2URu7V/84kfC7vEiEeELyCUuHqSq+qzC8mZCFx5fTL5EOQ4XodvTDFd7yBLGDiNVWZM17Kn1NBOc3vsMZ7Ro4tIx2QBnQFr/mNvzfxu95TaT0Ntgk1L8O/O4HlVm3ooGd34DBqCK+t9yZ/jBk2UqxPExTmDMm6gYBp0knRK6kveZb2Yc1CTHDlcnC028unH7OLayPhfGrCp05fiYwrScjmLt/OloVTN6V8KYR4el8koEgd7yGgJm4jxxU85bNNGqeQ/gX1llNAl53g4uq9W0+SbYWQmj4zqnElIRLUceVavwY5qA0kZMEyyfpOGXHvUgtKtqUZcPEqqAzBywgZuTktF8D0wuYoBRAYkWI0BV72bNtpsoxmNkqjRYBvmpboYu1dnQd4jOOFw6m/VDlDboAKiZVm2hiogwtmahLn+eAUwUM+L757KjNlaWCHeIQndiavRcQsxc2V387ic9oUpV9fVc9wqp08AGz8zmBiWtBoc0SEne+VBCZgjgRdcImmKgMvjD19dr2EvAyx6StxaLfjrcoGOOLvLY1+927K7TytE0PPvJPTdBTwkXFXu7FQLNou0kXkfO++Tr82wZbU9dEtVvZaczq/hzw98laG2qtIiH1NeIKRbIT3xAbLIiQ== demo_openmrg_client +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDljStaQ6VN+M31AW+qtR3pw3WdrTYdPAG/ulavmLrsceMUE71mTXHFf57GLOhrJZcwP+BRzrrNnn/+ZrZwHaWjPE82tdo8FoUA8JZFPU2BSbbUo2eo5R7vqNh5iH1lIo6ExwYEd39i9mNreq8ITqOi3HpqgMVTwwHlApTLfxcPEDa6raApDPnXoGgNaz2xlKzGcq/QBakOrPQLj/LcZyVnyO+tUJ49XPdchP87nW7yOHP98gDn0DcWnxMLLkeNG0eu15Y82j65qzpTTehRiqpxDm/JjFKstVEJ7+dhwZksIhlXzjyovD+JHtVhyFWlXpIIpz72GHuVuxWCYs8GOcMUvSD04osZZQHrHx6ZXqnkDqT+UpM6+HG7ATymBmF4DZV50Ah5aFKkJqDHWRCiCCQmQORb6a72vmeOcrKhOuNBUMVC8J7v9axwmt1KBGIQKW5LyKTZ74v+9yaILaPYA9LmIHRg2a9tmgQXlnvEte9mJSiIT5fEfAz+iAH+AbA1XNXH5hqVGrfFwlF175pbElXy+DLV/kPmt8Vmg9X38hOrw3v/ONkUvPEOxX2ZLl72qqyoSKClgHmt9dYj+IgAfhv7jaUzDYzq/GIBdE+GJGT0oy1oNAIBiWH/0ynGDRUOkioKX91lZgBPahP+dTk2b1uCWrbp8g/Ezejlw+MYEi8quw== demo_openmrg_client diff --git a/ssh_keys/demo_orange_cameroun/authorized_keys b/ssh_keys/demo_orange_cameroun/authorized_keys index e01fe69..392bb50 100644 --- a/ssh_keys/demo_orange_cameroun/authorized_keys +++ b/ssh_keys/demo_orange_cameroun/authorized_keys @@ -1 +1 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC4Nnv8SnkT/e+aOAp1+3dDIKY2/FNWtl6rdLYmlJdD2XxD0OEnZde4b9VQ/+9XfrNUqppjkk0LFzgKNxtQ5lObhWvhqs3Jll76NjCXMK9Bt46yvEfi+PB9KLrGxY/FOynssX9Fq/z2iYF+95+GdzJvAwE2YLzKrFYtLsg9Ut3CBH36oVAHl5oi+2iUAoIyCZxrYPadYB0tEAsyi8geqlue7u8FcgfjEwZs8d1FoMKNSH8rdLHJuQAxuJJPtlwSVrr4PggIlVCzPGN+uKqXYD9xavhddPoxFB1JVaAxlfkfdWbk8km8XS62B2RKfCECyCyEQ0aq5249/5WHnv6+QF8m/tOs1+jnT0UPsA6hdfqULYk2yP0H8k5QBQI0ge0zR/gCudgChZQFnfbOnhSWHK9FEUMcMtdTbT8I6WEr12vIVdqVFmcVuxe9zizWVneFAt/MDT6aeEFZYIdCVHGPI05XJb4Rwu1+wpafGR3P7GHjnZMZouA5P+wLoP77c5QwYpMGjNNsBKwtPbtOJma3ETAEM8nRBi494qE7K0URGaCAlNoIHR5hVoPydfTQ2oJDCCPh4tF+WhDCqgq8bHosDLhO1DJHU35u9T771BaFEmwcxP9BXrROfoEIrgvQ9JCtzTlHehYRnO2qEaXQ69a393zsnqcsdWNNrN8uetyXFe29dw== demo_orange_cameroun_client +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC09FfnYV7vRqnOGoWJMDhmh3oVcE+vqf1utsvIvxkdMEhV6Vi3SyTofKaSTOZ2JzfXl6gup2AZfd/1unHVDY0icy0gz/KERXn5v4Qe+6jAL0cpaCaKNSq9VuCsOGNiDXO0xN3+mP08QEYkeUz6kn/sPaOweeemaf3fsSiN7ePbwLZtHwAeIhHN8JZX+S8+p7RAbjLnres1vfna4alNU42YViY1gmOAKhxT/z2zlshwBKELa1i3gQTwCgE1z7Ki3uz5NpwLATV3zRSSWvMcZozFk+tizSoQCkKZ3PDuXTqN9I6QWBLrshyvcp73Anx+dJedm0267p7xGGoXNeILuY4vmu+dmgsEf7AuEVPeLuKT/AfT5ET5wWG+WB/i9bbQU0yX8v8dyvMjVNY0cKl7KmwHH7VAHccIMq2ATyfFxT2eph1ed7BUnl+nFfeYHbths9x+UERcXxFfJdBjDwJSov7BF+myinf6LBwsWAwcNM+dvPN2jAwxVFvCztP+q2pt6rmvVQ3bNVKhQhXkVNPLTqVsYXYubQQUrDMOz7bX5futTV2kYNnEdFblEDHS1ksKTn8pqEh2V/PHiim5FyDxm5I/wcjv5HPjLCjdPM82ZiyX7s+ISihlNpPfCtC0cnNkmU0xO7/ULv6scKLSST7pJcRv1o0EUv5gFVQjbedjdwjd7Q== demo_orange_cameroun_client diff --git a/ssh_keys/generate_ssh_keys.sh b/ssh_keys/generate_ssh_keys.sh index d21fa99..07e4652 100755 --- a/ssh_keys/generate_ssh_keys.sh +++ b/ssh_keys/generate_ssh_keys.sh @@ -4,14 +4,18 @@ set -e # SSH Keys Generation Script for SFTP Server # This script generates all required SSH keys for the SFTP server and MNO simulator -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)" cd "$SCRIPT_DIR" -echo "=== Generating SSH Keys for SFTP Server ===" +echo "=== Generating SSH Keys for SFTP Server (Multi-tenant) ===" echo -# Check if keys already exist -if [ -f "id_rsa" ] || [ -f "sftp_host_rsa_key" ]; then +# List of tenants (add new tenants here) +TENANTS=(demo_openmrg demo_orange_cameroun) + +# Remove all old keys if any exist +if ls id_rsa* sftp_host_*_key* 1>/dev/null 2>&1; then echo "⚠️ Warning: SSH keys already exist!" read -p "Do you want to regenerate them? This will overwrite existing keys. (y/N): " -n 1 -r echo @@ -20,46 +24,48 @@ if [ -f "id_rsa" ] || [ -f "sftp_host_rsa_key" ]; then exit 0 fi echo "Removing old keys..." - rm -f id_rsa id_rsa.pub id_rsa_orange_cameroun id_rsa_orange_cameroun.pub sftp_host_*_key* authorized_keys known_hosts + rm -f id_rsa* sftp_host_*_key* authorized_keys known_hosts + for t in "${TENANTS[@]}"; do + rm -f "$t/authorized_keys" + done fi -echo "1. Generating client key pair for demo_openmrg (OpenMRG simulator)..." -ssh-keygen -t rsa -b 4096 -f id_rsa -N "" -C "demo_openmrg_client" -echo "✓ Client key pair generated: id_rsa, id_rsa.pub" +# Generate client key pairs and authorized_keys for each tenant +for TENANT in "${TENANTS[@]}"; do + KEY_NAME="id_rsa_${TENANT}" + DIR_NAME="$TENANT" + mkdir -p "$DIR_NAME" + if [ ! -f "$KEY_NAME" ]; then + echo "Generating SSH key for $TENANT ..." + ssh-keygen -t rsa -b 4096 -f "$KEY_NAME" -N "" -C "${TENANT}_client" + else + echo "Key for $TENANT already exists, skipping generation." + fi + cp "${KEY_NAME}.pub" "$DIR_NAME/authorized_keys" + echo "✓ $TENANT: $KEY_NAME, $DIR_NAME/authorized_keys" +done echo -echo "1b. Generating client key pair for demo_orange_cameroun (Orange Cameroun simulator)..." -ssh-keygen -t rsa -b 4096 -f id_rsa_orange_cameroun -N "" -C "demo_orange_cameroun_client" -echo "✓ Client key pair generated: id_rsa_orange_cameroun, id_rsa_orange_cameroun.pub" -echo +# For backward compatibility: create id_rsa for demo_openmrg +if [ -f id_rsa_demo_openmrg ]; then + cp id_rsa_demo_openmrg id_rsa + cp id_rsa_demo_openmrg.pub id_rsa.pub +fi -echo "2. Generating SFTP server host keys..." -# Ed25519 (modern, recommended) +# Generate SFTP server host keys +echo "Generating SFTP server host keys..." ssh-keygen -t ed25519 -f sftp_host_ed25519_key -N "" -C "sftp_server_host" -echo "✓ Ed25519 host key generated" - -# RSA (for compatibility) ssh-keygen -t rsa -b 4096 -f sftp_host_rsa_key -N "" -C "sftp_server_host" -echo "✓ RSA host key generated" -echo - -echo "3. Creating authorized_keys files..." -cat id_rsa.pub > authorized_keys -# Per-user directories for the multi-user SFTP setup -mkdir -p demo_openmrg demo_orange_cameroun -cp id_rsa.pub demo_openmrg/authorized_keys -cp id_rsa_orange_cameroun.pub demo_orange_cameroun/authorized_keys -echo "✓ authorized_keys created" +echo "✓ SFTP server host keys generated" echo -echo "4. Creating known_hosts file..." -# For Docker internal network (sftp_receiver hostname) +# Create known_hosts file +echo "Creating known_hosts file..." { echo -n "sftp_receiver " cat sftp_host_ed25519_key.pub echo -n "sftp_receiver " cat sftp_host_rsa_key.pub - # Also add localhost entries for testing from host echo -n "[localhost]:2222 " cat sftp_host_ed25519_key.pub echo -n "[localhost]:2222 " @@ -68,25 +74,27 @@ echo "4. Creating known_hosts file..." echo "✓ known_hosts created" echo -echo "5. Setting proper file permissions..." -chmod 600 id_rsa id_rsa_orange_cameroun sftp_host_*_key -chmod 644 id_rsa.pub id_rsa_orange_cameroun.pub sftp_host_*_key.pub authorized_keys known_hosts +# Set permissions +chmod 600 id_rsa* sftp_host_*_key +chmod 644 id_rsa*.pub sftp_host_*_key.pub known_hosts +for TENANT in "${TENANTS[@]}"; do + chmod 644 "$TENANT/authorized_keys" +done echo "✓ Permissions set" echo echo "=== SSH Keys Generated Successfully ===" +echo "Tenants: ${TENANTS[*]}" echo echo "Generated files:" -echo " - id_rsa (demo_openmrg private key)" -echo " - id_rsa.pub (demo_openmrg public key)" -echo " - id_rsa_orange_cameroun (demo_orange_cameroun private key)" -echo " - id_rsa_orange_cameroun.pub (demo_orange_cameroun public key)" -echo " - sftp_host_ed25519_key (SFTP server Ed25519 private key)" -echo " - sftp_host_ed25519_key.pub (SFTP server Ed25519 public key)" -echo " - sftp_host_rsa_key (SFTP server RSA private key)" -echo " - sftp_host_rsa_key.pub (SFTP server RSA public key)" -echo " - authorized_keys (Authorized client keys)" -echo " - known_hosts (Known SFTP server host keys)" +for TENANT in "${TENANTS[@]}"; do + echo " - id_rsa_${TENANT} (private key)" + echo " - id_rsa_${TENANT}.pub (public key)" + echo " - $TENANT/authorized_keys (authorized_keys for $TENANT)" +done +echo " - sftp_host_ed25519_key / .pub (SFTP server Ed25519 key)" +echo " - sftp_host_rsa_key / .pub (SFTP server RSA key)" +echo " - known_hosts (Known SFTP server host keys)" echo echo "⚠️ SECURITY WARNING:" echo " These keys are for DEVELOPMENT/TESTING only!" @@ -95,6 +103,7 @@ echo " DO NOT commit private keys to version control." echo echo "Next steps:" echo " 1. Start the services: docker compose up -d" -echo " 2. Test SFTP (OpenMRG): sftp -P 2222 -i ssh_keys/id_rsa demo_openmrg@localhost" -echo " 3. Test SFTP (Orange Cameroun): sftp -P 2222 -i ssh_keys/id_rsa_orange_cameroun demo_orange_cameroun@localhost" +for TENANT in "${TENANTS[@]}"; do + echo " 2. Test SFTP ($TENANT): sftp -P 2222 -i ssh_keys/id_rsa_${TENANT} $TENANT@localhost" +done echo