From 2331b7ac3e6006fc530507a70ebee81212a009a0 Mon Sep 17 00:00:00 2001 From: Somanath Date: Tue, 12 May 2026 13:58:28 +0530 Subject: [PATCH 1/3] refactor: reduce cyclomatic complexity across Python scripts --- python/cifs_provision.py | 398 ++++++++++++++++++---------------- python/cluster_setup_basic.py | 34 ++- 2 files changed, 241 insertions(+), 191 deletions(-) diff --git a/python/cifs_provision.py b/python/cifs_provision.py index cd7ecf8..eca207e 100644 --- a/python/cifs_provision.py +++ b/python/cifs_provision.py @@ -52,7 +52,7 @@ "SVM_NAME": "vs1", "VOLUME_NAME": "vol_002", "VOLUME_SIZE": "100MB", - "AGGR_NAME": "sti232_vsim_sr091o_aggr1", # required — set via --aggregate or AGGR_NAME env var + "AGGR_NAME": "", # required — set via --aggregate or AGGR_NAME env var "CLIENT_MATCH": "0.0.0.0/0", # required — set via --client-match or CLIENT_MATCH env var "SHARE_NAME": "cifs_share_demo", "SHARE_COMMENT": "Provisioned by orchestrio", @@ -118,215 +118,233 @@ def parse_args() -> argparse.Namespace: return p.parse_args() -def main() -> None: - args = parse_args() +def _pick(cli_val: str | None, env_key: str, default: str = "") -> str: + """Return the first non-empty value from: CLI arg, env var, ENV dict, or default.""" + return cli_val or os.environ.get(env_key) or ENV.get(env_key, "") or default + - # Load env file first so its values can be read via os.environ below +def _resolve_config(args: argparse.Namespace) -> dict[str, str | bool]: + """Load env file and CLI args, then return the resolved configuration dict.""" if args.env_file: _load_env_file(args.env_file) - # Push ENV block values into os.environ so OntapClient.from_env() picks them up for key, value in ENV.items(): if value and key not in os.environ: os.environ[key] = value - # Resolve each value: CLI arg > env var > ENV block > built-in default (matches YAML priority) - svm = args.svm or os.environ.get("SVM_NAME") or ENV["SVM_NAME"] or "vs0" - volume = args.volume or os.environ.get("VOLUME_NAME") or ENV["VOLUME_NAME"] or "cifs_test_env" - size = args.size or os.environ.get("VOLUME_SIZE") or ENV["VOLUME_SIZE"] or "100MB" - aggregate = args.aggregate or os.environ.get("AGGR_NAME") or ENV["AGGR_NAME"] or "" - share_name = ( - args.share_name or os.environ.get("SHARE_NAME") or ENV["SHARE_NAME"] or "cifs_share_demo" + aggregate = _pick(args.aggregate, "AGGR_NAME") + if not aggregate: + logger.error("--aggregate is required (or set AGGR_NAME in env / --env-file)") + sys.exit(1) + + return { + "svm": _pick(args.svm, "SVM_NAME", "vs0"), + "volume": _pick(args.volume, "VOLUME_NAME", "cifs_test_env"), + "size": _pick(args.size, "VOLUME_SIZE", "100MB"), + "aggregate": aggregate, + "share_name": _pick(args.share_name, "SHARE_NAME", "cifs_share_demo"), + "share_comment": _pick(args.share_comment, "SHARE_COMMENT", "Provisioned by orchestrio"), + "acl_user": _pick(args.acl_user, "ACL_USER", "Everyone"), + "acl_permission": _pick(args.acl_permission, "ACL_PERMISSION", "full_control"), + "create_cifs_server": args.create_cifs_server, + "cifs_server_name": _pick(args.cifs_server_name, "CIFS_SERVER_NAME", "ONTAP-CIFS"), + "workgroup": _pick(args.workgroup, "CIFS_WORKGROUP", "WORKGROUP"), + } + + +def _ensure_cifs_server( + client: OntapClient, + svm: str, + create_cifs_server: bool, + cifs_server_name: str, + workgroup: str, +) -> None: + """Verify a CIFS server exists on the SVM, optionally creating one if missing.""" + cifs_svc_resp = client.get( + "/protocols/cifs/services", + fields="svm.name,enabled", + **{"svm.name": svm}, + ) + if cifs_svc_resp.get("num_records", 0) > 0: + logger.info("CIFS server confirmed on SVM '%s'", svm) + return + + if not create_cifs_server: + logger.error( + "ABORTED - no CIFS server found on SVM '%s'. " + "Pass --create-cifs-server to create one automatically, or use " + "'vserver cifs create' before running this script.", + svm, + ) + sys.exit(1) + + logger.info( + "No CIFS server on SVM '%s' - creating workgroup server '%s' in workgroup '%s'...", + svm, + cifs_server_name, + workgroup, ) - share_comment = ( - args.share_comment - or os.environ.get("SHARE_COMMENT") - or ENV["SHARE_COMMENT"] - or "Provisioned by orchestrio" + resp = client.post( + "/protocols/cifs/services", + body={ + "svm": {"name": svm}, + "name": cifs_server_name, + "workgroup": workgroup, + "enabled": True, + }, ) - acl_user = args.acl_user or os.environ.get("ACL_USER") or ENV["ACL_USER"] or "Everyone" - acl_permission = ( - args.acl_permission - or os.environ.get("ACL_PERMISSION") - or ENV["ACL_PERMISSION"] - or "full_control" + if resp.get("job"): + client.poll_job(resp["job"]["uuid"]) + logger.info( + "CIFS server '%s' created in workgroup '%s' on SVM '%s'", + cifs_server_name, + workgroup, + svm, ) - create_cifs_server = args.create_cifs_server - cifs_server_name = ( - args.cifs_server_name - or os.environ.get("CIFS_SERVER_NAME") - or ENV["CIFS_SERVER_NAME"] - or "ONTAP-CIFS" + +def _ensure_volume_ntfs( + client: OntapClient, svm: str, volume: str, size: str, aggregate: str +) -> dict: + """Create the FlexVol (NTFS security style) if it does not exist. Returns the job result.""" + existing = client.get( + "/storage/volumes", + fields="name,uuid", + name=volume, + **{"svm.name": svm}, + ) + if existing.get("records"): + logger.info("Volume '%s' already exists - skipping create", volume) + return {"state": "skipped", "message": "volume already existed"} + + logger.info("Creating volume '%s' (%s) on SVM '%s'...", volume, size, svm) + resp = client.post( + "/storage/volumes", + body={ + "name": volume, + "svm": {"name": svm}, + "aggregates": [{"name": aggregate}], + "size": size, + "nas": { + "security_style": "ntfs", + "path": f"/{volume}", + }, + }, ) - workgroup = ( - args.workgroup or os.environ.get("CIFS_WORKGROUP") or ENV["CIFS_WORKGROUP"] or "WORKGROUP" + job_uuid = resp["job"]["uuid"] + logger.info("Volume creation job: %s", job_uuid) + return client.poll_job(job_uuid) + + +def _get_svm_uuid(client: OntapClient, svm: str) -> str: + """Fetch and return the UUID for the named SVM.""" + resp = client.get("/svm/svms", fields="name,uuid", name=svm) + return resp["records"][0]["uuid"] + + +def _ensure_cifs_share( + client: OntapClient, + svm_uuid: str, + share_name: str, + volume: str, + svm: str, + share_comment: str, +) -> None: + """Create the CIFS share if it does not already exist.""" + try: + existing = client.get( + f"/protocols/cifs/shares/{svm_uuid}/{share_name}", + fields="name", + ) + share_exists = bool(existing.get("name")) + except OntapApiError as exc: + if exc.status_code == 404: + share_exists = False + else: + raise + + if share_exists: + logger.info("CIFS share '%s' already exists - skipping create", share_name) + return + + logger.info("Creating CIFS share '%s' on path '/%s'...", share_name, volume) + client.post( + "/protocols/cifs/shares", + body={ + "name": share_name, + "path": f"/{volume}", + "svm": {"name": svm}, + "comment": share_comment, + }, ) - if not aggregate: - logger.error("--aggregate is required (or set AGGR_NAME in env / --env-file)") - sys.exit(1) - with OntapClient.from_env() as client: - # Pre-flight — verify CIFS server is enabled on the SVM - # A CIFS share cannot be created if no CIFS server exists on the SVM. - # Exits early with a clear error rather than failing mid-workflow. - cifs_svc_resp = client.get( - "/protocols/cifs/services", - fields="svm.name,enabled", - **{"svm.name": svm}, +def _set_share_acl( + client: OntapClient, + svm_uuid: str, + share_name: str, + acl_user: str, + acl_permission: str, +) -> None: + """Patch the share ACL entry for the given user with the specified permission.""" + logger.info("Setting ACL: %s -> %s...", acl_user, acl_permission) + client.patch( + f"/protocols/cifs/shares/{svm_uuid}/{share_name}/acls/{acl_user}/windows", + body={"permission": acl_permission}, + ) + + +def _verify_and_log_acls(client: OntapClient, svm_uuid: str, share_name: str) -> None: + """Fetch the share and log each ACL entry for confirmation.""" + logger.info("Verifying share '%s'...", share_name) + resp = client.get( + f"/protocols/cifs/shares/{svm_uuid}/{share_name}", + fields="name,path,acls", + ) + for acl in resp.get("acls", []): + logger.info( + " ACL: %s (%s) -> %s", + acl.get("user_or_group", "N/A"), + acl.get("type", "N/A"), + acl.get("permission", "N/A"), ) - if cifs_svc_resp.get("num_records", 0) == 0: - if not create_cifs_server: - logger.error( - "ABORTED — no CIFS server found on SVM '%s'. " - "Pass --create-cifs-server to create one automatically, or use " - "'vserver cifs create' before running this script.", - svm, - ) - sys.exit(1) - logger.info( - "No CIFS server on SVM '%s' — creating workgroup server '%s' in workgroup '%s'…", - svm, - cifs_server_name, - workgroup, - ) - cifs_create_resp = client.post( - "/protocols/cifs/services", - body={ - "svm": {"name": svm}, - "name": cifs_server_name, - "workgroup": workgroup, - "enabled": True, - }, - ) - # ONTAP may return an async job for CIFS server creation - if cifs_create_resp.get("job"): - cifs_job_uuid = cifs_create_resp["job"]["uuid"] - logger.info("CIFS server creation job: %s", cifs_job_uuid) - client.poll_job(cifs_job_uuid) - logger.info( - "CIFS server '%s' created in workgroup '%s' on SVM '%s'", - cifs_server_name, - workgroup, - svm, - ) - else: - logger.info("CIFS server confirmed on SVM '%s'", svm) - - # Step 1 — create volume with NTFS security style (idempotent: skip if exists) - # POST /storage/volumes to create a FlexVol with security_style=ntfs. - # NTFS security style is required for CIFS/SMB share ACL enforcement. - existing_vol = client.get( - "/storage/volumes", - fields="name,uuid", - name=volume, - **{"svm.name": svm}, + + +def main() -> None: + cfg = _resolve_config(parse_args()) + svm = cfg["svm"] + volume = cfg["volume"] + size = cfg["size"] + aggregate = cfg["aggregate"] + share_name = cfg["share_name"] + share_comment = cfg["share_comment"] + acl_user = cfg["acl_user"] + acl_permission = cfg["acl_permission"] + + with OntapClient.from_env() as client: + _ensure_cifs_server( + client, svm, cfg["create_cifs_server"], cfg["cifs_server_name"], cfg["workgroup"] ) - if existing_vol.get("records"): - logger.info("Volume '%s' already exists — skipping create", volume) - job_result = {"state": "skipped", "message": "volume already existed"} - else: - logger.info("Creating volume '%s' (%s) on SVM '%s'…", volume, size, svm) - create_resp = client.post( - "/storage/volumes", - body={ - "name": volume, - "svm": {"name": svm}, - "aggregates": [{"name": aggregate}], - "size": size, - "nas": { - "security_style": "ntfs", - "path": f"/{volume}", - }, - }, - ) - - # Step 2 — poll volume-creation job - # Block until the async job finishes; the job result is logged in Step 3. - job_uuid = create_resp["job"]["uuid"] - logger.info("Volume creation job: %s", job_uuid) - job_result = client.poll_job(job_uuid) - - # Step 3 — print volume creation status - # Log the final job state and message for confirmation before continuing. + + job_result = _ensure_volume_ntfs(client, svm, volume, size, aggregate) state = job_result.get("state", "unknown") message = job_result.get("message", "") - logger.info("Volume '%s' job → %s: %s", volume, state, message) - - # Step 4 — create CIFS share (idempotent: skip if already exists) - # POST /protocols/cifs/shares to create the share pointing at the volume junction. - # ONTAP auto-creates a default 'Everyone / Full Control' ACL entry on creation. - svm_resp = client.get( - "/svm/svms", - fields="name,uuid", - name=svm, - ) - svm_uuid = svm_resp["records"][0]["uuid"] - - try: - existing_share = client.get( - f"/protocols/cifs/shares/{svm_uuid}/{share_name}", - fields="name", - ) - share_exists = bool(existing_share.get("name")) - except OntapApiError as exc: - if exc.status_code == 404: - share_exists = False - else: - raise - if share_exists: - logger.info("CIFS share '%s' already exists — skipping create", share_name) - else: - logger.info("Creating CIFS share '%s' on path '/%s'…", share_name, volume) - client.post( - "/protocols/cifs/shares", - body={ - "name": share_name, - "path": f"/{volume}", - "svm": {"name": svm}, - "comment": share_comment, - }, - ) - - # Step 6 — set share ACL (PATCH the auto-created Everyone entry) - # svm_uuid was resolved in Step 4 above (needed for the ACL URL). - # PATCH replaces the permission on the existing ACL entry for the given user. - # Default is 'Everyone' with 'full_control'; customise via ACL_USER/ACL_PERMISSION. - logger.info("Setting ACL: %s → %s…", acl_user, acl_permission) - client.patch( - f"/protocols/cifs/shares/{svm_uuid}/{share_name}/acls/{acl_user}/windows", - body={"permission": acl_permission}, - ) - - # Step 7 — verify share and ACL - # GET the share and inspect the acls array to confirm the permission was applied. - # Logs each ACL entry (user, type, permission) for visual confirmation. - logger.info("Verifying share '%s'…", share_name) - verify_resp = client.get( - f"/protocols/cifs/shares/{svm_uuid}/{share_name}", - fields="name,path,acls", - ) - acls = verify_resp.get("acls", []) - for acl in acls: - logger.info( - " ACL: %s (%s) → %s", - acl.get("user_or_group", "—"), - acl.get("type", "—"), - acl.get("permission", "—"), - ) - - # Step 8 — print summary - # Log a single success line with share name, volume, SVM, path, and ACL. - logger.info( - "✓ CIFS share '%s' created on volume '%s' (SVM: %s) | Path: /%s | ACL: %s → %s", - share_name, - volume, - svm, - volume, - acl_user, - acl_permission, - ) + logger.info("Volume '%s' job -> %s: %s", volume, state, message) + + svm_uuid = _get_svm_uuid(client, svm) + _ensure_cifs_share(client, svm_uuid, share_name, volume, svm, share_comment) + _set_share_acl(client, svm_uuid, share_name, acl_user, acl_permission) + _verify_and_log_acls(client, svm_uuid, share_name) + + logger.info( + "[OK] CIFS share '%s' on volume '%s' (SVM: %s) | Path: /%s | ACL: %s -> %s", + share_name, + volume, + svm, + volume, + acl_user, + acl_permission, + ) if __name__ == "__main__": diff --git a/python/cluster_setup_basic.py b/python/cluster_setup_basic.py index 757364e..1945bdb 100644 --- a/python/cluster_setup_basic.py +++ b/python/cluster_setup_basic.py @@ -5,7 +5,6 @@ """Create a storage cluster from two pre-cluster nodes. -Equivalent to: orchestrio run yaml-workflows/workflows/cluster_setup_basic.yaml Steps: 1. discover_nodes — GET /api/cluster/nodes (membership=available, retry 3x/30s) @@ -16,6 +15,7 @@ Usage:: + # env vars directly export ONTAP_HOST=10.x.x.x # pre-cluster node IP export ONTAP_USER=admin # usually admin, empty pass on pre-cluster nodes export ONTAP_PASS= @@ -26,14 +26,17 @@ export CLUSTER_GATEWAY=10.x.x.1 export PARTNER_MGMT_IP=10.x.x.y python cluster_setup_basic.py + """ from __future__ import annotations +import argparse import logging import os import sys import time +from pathlib import Path from ontap_client import OntapClient @@ -263,7 +266,36 @@ def main() -> None: ) +def _load_env_file(path: str) -> None: + """Load KEY=VALUE pairs from a .env file into the INPUTS dict.""" + for line in Path(path).read_text().splitlines(): + line = line.strip() + if not line or line.startswith("#") or "=" not in line: + continue + key, _, value = line.partition("=") + INPUTS[key.strip()] = value.strip().strip('"').strip("'") + + if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Create an ONTAP cluster from two pre-cluster nodes." + ) + parser.add_argument( + "--env-file", + metavar="FILE", + help="Path to a .env file with KEY=VALUE pairs (one per build, like -ir in ha_create.exp).", + ) + args = parser.parse_args() + + if args.env_file: + _load_env_file(args.env_file) + + # env vars always win over INPUTS block defaults + for key in list(INPUTS): + val = os.environ.get(key) + if val: + INPUTS[key] = val + try: main() except KeyboardInterrupt: From 6676a3c9509ce1fd119d96a9ef2a230acdb0549a Mon Sep 17 00:00:00 2001 From: Somanath Date: Tue, 12 May 2026 16:45:57 +0530 Subject: [PATCH 2/3] chore: remove unnecessary inline comments for production readiness - Remove phase narration comment blocks above logger.info phase headers - Remove redundant _env() inline doc comment across all scripts - Remove stale section decorators (#--- banners for Steps/Main) - Remove garbled mojibake chars from ENV dict inline comments (nfs_provision) - Fix bug in snapmirror_test_failover.py: logger.info() calls for phases A/B/C were embedded inside comment text and never executed - Preserve USER INPUTS banners, Steps banners, and version-specific field-set comments that carry non-obvious semantic meaning All scripts pass ruff check + ruff format --check. --- python/cifs_provision.py | 9 - python/cluster_info.py | 14 +- python/cluster_setup_basic.py | 18 +- python/nfs_provision.py | 280 ++++++------- python/snapmirror_cleanup_test_failover.py | 259 ++++++------ python/snapmirror_provision_dest_managed.py | 424 +++++++++++--------- python/snapmirror_provision_src_managed.py | 23 -- python/snapmirror_test_failover.py | 24 +- 8 files changed, 504 insertions(+), 547 deletions(-) diff --git a/python/cifs_provision.py b/python/cifs_provision.py index eca207e..4dfe231 100644 --- a/python/cifs_provision.py +++ b/python/cifs_provision.py @@ -19,12 +19,6 @@ Usage:: python cifs_provision.py - - # Or override values via CLI args: - python cifs_provision.py --svm vs0 --volume cifs_test_env --aggregate aggr1 - - # Or supply all values via an env file (same as YAML --env-file): - python cifs_provision.py --env-file cifs-provision.env """ from __future__ import annotations @@ -43,8 +37,6 @@ ) logger = logging.getLogger(__name__) -# ── Inputs (edit these directly, same as the YAML env: block) ──────────────── -# These are the defaults. CLI args and env vars override them. ENV = { "ONTAP_HOST": "", # cluster management IP — set here or via ONTAP_HOST env var "ONTAP_USER": "admin", @@ -61,7 +53,6 @@ "CIFS_SERVER_NAME": "ONTAP-CIFS", "CIFS_WORKGROUP": "WORKGROUP", } -# ───────────────────────────────────────────────────────────────────────────── def _load_env_file(path: str) -> None: diff --git a/python/cluster_info.py b/python/cluster_info.py index 787ecad..a93a215 100644 --- a/python/cluster_info.py +++ b/python/cluster_info.py @@ -6,8 +6,8 @@ """Retrieve storage cluster version and list all nodes with serial numbers. Steps: - 1. GET /cluster ΓÇö retrieve cluster name and ONTAP version - 2. GET /cluster/nodes ΓÇö list all nodes with serial numbers + 1. GET /cluster — retrieve cluster name and ONTAP version + 2. GET /cluster/nodes — list all nodes with serial numbers Prerequisites:: @@ -35,15 +35,15 @@ def main() -> None: with OntapClient.from_env() as client: - # Step 1 ΓÇö cluster version + # Step 1 — cluster version cluster = client.get("/cluster", fields="version") logger.info( - "Cluster: %s ΓÇö ONTAP %s", + "Cluster: %s — ONTAP %s", cluster.get("name", "unknown"), cluster.get("version", {}).get("full", "unknown"), ) - # Step 2 ΓÇö node list with serial numbers + # Step 2 — node list with serial numbers nodes_resp = client.get("/cluster/nodes", fields="name,serial_number") records = nodes_resp.get("records", []) logger.info("Nodes in cluster: %d", nodes_resp.get("num_records", len(records))) @@ -51,8 +51,8 @@ def main() -> None: for node in records: logger.info( " %-30s serial: %s", - node.get("name", "ΓÇö"), - node.get("serial_number", "ΓÇö"), + node.get("name", "—"), + node.get("serial_number", "—"), ) diff --git a/python/cluster_setup_basic.py b/python/cluster_setup_basic.py index 1945bdb..3c7c250 100644 --- a/python/cluster_setup_basic.py +++ b/python/cluster_setup_basic.py @@ -2,10 +2,7 @@ # © 2026 NetApp, Inc. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # See the NOTICE file in the repo root for trademark and attribution details. - """Create a storage cluster from two pre-cluster nodes. - - Steps: 1. discover_nodes — GET /api/cluster/nodes (membership=available, retry 3x/30s) 2. discover_local — isolate the local node (has management_interfaces != null) @@ -52,11 +49,11 @@ INPUTS = { "ONTAP_HOST": "", # Node 1 management IP — set via ONTAP_HOST env var "ONTAP_USER": "admin", - "ONTAP_PASS": "", # set via ONTAP_PASS env var — leave empty for pre-cluster nodes - "CLUSTER_NAME": "", # choose your cluster name — set via CLUSTER_NAME env var + "ONTAP_PASS": "", # leave empty for pre-cluster nodes + "CLUSTER_NAME": "", # set via CLUSTER_NAME env var "CLUSTER_PASS": "", # set via CLUSTER_PASS env var — choose your cluster admin password "CLUSTER_MGMT_IP": "", # cluster management IP — set via CLUSTER_MGMT_IP env var - "CLUSTER_NETMASK": "", # e.g. 255.255.255.0 — set via CLUSTER_NETMASK env var + "CLUSTER_NETMASK": "", # set via CLUSTER_NETMASK env var "CLUSTER_GATEWAY": "", # default gateway — set via CLUSTER_GATEWAY env var "PARTNER_MGMT_IP": "", # Node 2 management IP — set via PARTNER_MGMT_IP env var } @@ -82,7 +79,6 @@ def _env(key: str, required: bool = True) -> str: - # Prefer value from INPUTS dict; fall back to environment variable. val = INPUTS.get(key) or os.environ.get(key, "") if required and not val: logger.error( @@ -236,11 +232,6 @@ def track_job(client: OntapClient, job_uuid: str) -> dict: time.sleep(10) -# --------------------------------------------------------------------------- -# Main -# --------------------------------------------------------------------------- - - def main() -> None: host = _env("ONTAP_HOST") user = _env("ONTAP_USER") @@ -268,7 +259,7 @@ def main() -> None: def _load_env_file(path: str) -> None: """Load KEY=VALUE pairs from a .env file into the INPUTS dict.""" - for line in Path(path).read_text().splitlines(): + for line in Path(path).read_text(encoding="utf-8").splitlines(): line = line.strip() if not line or line.startswith("#") or "=" not in line: continue @@ -290,7 +281,6 @@ def _load_env_file(path: str) -> None: if args.env_file: _load_env_file(args.env_file) - # env vars always win over INPUTS block defaults for key in list(INPUTS): val = os.environ.get(key) if val: diff --git a/python/nfs_provision.py b/python/nfs_provision.py index 7a2ba31..b8b6d13 100644 --- a/python/nfs_provision.py +++ b/python/nfs_provision.py @@ -26,16 +26,15 @@ --aggregate aggr1 \\ --client-match 0.0.0.0/0 - # Or supply all values via an env file (same as YAML --env-file): + # Or supply all values via an env file: python nfs_provision.py --env-file nfs-provision.env Default values (vs0, vol_nfs_test_01, 0.0.0.0/0, etc.) are for illustration -only. Replace them with values appropriate for your environment ΓÇö +only. Replace them with values appropriate for your environment — in particular, restrict ``--client-match`` to your actual client subnet. -This script is *not* idempotent: running it twice with the same volume name -will fail. See ``python/README.md`` ΓåÆ "Adapting for Your Environment" for -guidance on adding existence checks. +This script is idempotent: re-running with the same parameters skips steps +that are already complete (volume exists, export policy exists, rule exists). """ from __future__ import annotations @@ -54,19 +53,16 @@ ) logger = logging.getLogger(__name__) -# ΓöÇΓöÇ Inputs (edit these directly, same as the YAML env: block) ΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇ -# These are the defaults. CLI args and env vars override them. ENV = { - "ONTAP_HOST": "", # cluster management IP ΓÇö set here or via ONTAP_HOST env var + "ONTAP_HOST": "", # cluster management IP — set here or via ONTAP_HOST env var "ONTAP_USER": "admin", - "ONTAP_PASS": "", # never hardcode ΓÇö set via ONTAP_PASS env var + "ONTAP_PASS": "", # never hardcode — set via ONTAP_PASS env var "SVM_NAME": "vs1", "VOLUME_NAME": "vol_001", "VOLUME_SIZE": "100MB", - "AGGR_NAME": "sti232_vsim_sr091o_aggr1", # required ΓÇö set via --aggregate or AGGR_NAME env var + "AGGR_NAME": "sti232_vsim_sr091o_aggr1", # required — set via --aggregate or AGGR_NAME env var "CLIENT_MATCH": "0.0.0.0/0", } -# ΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇΓöÇ def _load_env_file(path: str) -> None: @@ -102,160 +98,144 @@ def parse_args() -> argparse.Namespace: return p.parse_args() -def main() -> None: - args = parse_args() +def _pick(arg: str | None, env_key: str, default: str = "") -> str: + """Return first non-empty value from: CLI arg -> env var -> ENV block -> default.""" + return arg or os.environ.get(env_key) or ENV.get(env_key, "") or default - # Load env file first so its values can be read via os.environ below - if args.env_file: - _load_env_file(args.env_file) - # Push ENV block values into os.environ so OntapClient.from_env() picks them up +def _resolve_config(args: argparse.Namespace) -> tuple[str, str, str, str, str]: + """Push ENV defaults into os.environ then resolve final values from all sources.""" for key, value in ENV.items(): if value and key not in os.environ: os.environ[key] = value + svm = _pick(args.svm, "SVM_NAME", "vs0") + volume = _pick(args.volume, "VOLUME_NAME", "vol_nfs_test_01") + size = _pick(args.size, "VOLUME_SIZE", "100MB") + aggregate = _pick(args.aggregate, "AGGR_NAME") + client_match = _pick(args.client_match, "CLIENT_MATCH", "0.0.0.0/0") + return svm, volume, size, aggregate, client_match + + +def _ensure_volume(client: OntapClient, svm: str, volume: str, size: str, aggregate: str) -> str: + """Create a FlexVol if it does not exist; return its UUID.""" + existing = client.get( + "/storage/volumes", + fields="name,uuid", + name=volume, + **{"svm.name": svm}, + ) + if existing.get("records"): + logger.info("Volume '%s' already exists -- skipping create", volume) + else: + logger.info("Creating volume '%s' (%s) on SVM '%s'...", volume, size, svm) + create_resp = client.post( + "/storage/volumes", + body={ + "name": volume, + "svm": {"name": svm}, + "aggregates": [{"name": aggregate}], + "size": size, + "nas": {"path": f"/{volume}"}, + }, + ) + job_uuid = create_resp["job"]["uuid"] + logger.info("Volume creation job: %s", job_uuid) + client.poll_job(job_uuid) + logger.info("Volume '%s' created successfully", volume) + + vol_resp = client.get( + "/storage/volumes", + fields="name,uuid", + name=volume, + **{"svm.name": svm}, + ) + if not vol_resp.get("records"): + raise RuntimeError(f"Volume '{volume}' not found on SVM '{svm}' after creation") + return vol_resp["records"][0]["uuid"] + + +def _ensure_export_policy(client: OntapClient, svm: str, policy_name: str) -> int: + """Create an NFS export policy if it does not exist; return its numeric ID.""" + existing = client.get( + "/protocols/nfs/export-policies", + fields="name,id", + name=policy_name, + **{"svm.name": svm}, + ) + if existing.get("records"): + logger.info("Export policy '%s' already exists -- skipping create", policy_name) + else: + logger.info("Creating export policy '%s'...", policy_name) + client.post( + "/protocols/nfs/export-policies", + body={"name": policy_name, "svm": {"name": svm}}, + ) + policy_resp = client.get( + "/protocols/nfs/export-policies", + fields="name,id", + name=policy_name, + **{"svm.name": svm}, + ) + if not policy_resp.get("records"): + raise RuntimeError( + f"Export policy '{policy_name}' not found on SVM '{svm}' after creation" + ) + return policy_resp["records"][0]["id"] - # Resolve each value: CLI arg > env var > ENV block > built-in default (matches YAML priority) - svm = args.svm or os.environ.get("SVM_NAME") or ENV["SVM_NAME"] or "vs0" - volume = ( - args.volume or os.environ.get("VOLUME_NAME") or ENV["VOLUME_NAME"] or "vol_nfs_test_01" + +def _ensure_client_rule(client: OntapClient, policy_id: int, client_match: str) -> None: + """Add a client-match rule to an export policy if one does not already exist.""" + existing_rules = client.get( + f"/protocols/nfs/export-policies/{policy_id}/rules", + fields="index,clients", + ) + rule_exists = any( + any(c.get("match") == client_match for c in r.get("clients", [])) + for r in existing_rules.get("records", []) ) - size = args.size or os.environ.get("VOLUME_SIZE") or ENV["VOLUME_SIZE"] or "100MB" - aggregate = args.aggregate or os.environ.get("AGGR_NAME") or ENV["AGGR_NAME"] or "" - client_match = ( - args.client_match or os.environ.get("CLIENT_MATCH") or ENV["CLIENT_MATCH"] or "0.0.0.0/0" + if rule_exists: + logger.info("Client rule '%s' already exists in policy -- skipping", client_match) + return + logger.info("Adding client rule '%s' to policy...", client_match) + client.post( + f"/protocols/nfs/export-policies/{policy_id}/rules", + body={ + "clients": [{"match": client_match}], + "ro_rule": ["any"], + "rw_rule": ["any"], + "superuser": ["any"], + }, ) + +def _assign_policy(client: OntapClient, volume_uuid: str, policy_name: str) -> None: + """Assign an NFS export policy to a volume; polls async job if returned.""" + logger.info("Assigning export policy to volume...") + patch_resp = client.patch( + f"/storage/volumes/{volume_uuid}", + body={"nas": {"export_policy": {"name": policy_name}}}, + ) + if "job" in patch_resp: + client.poll_job(patch_resp["job"]["uuid"]) + + +def main() -> None: + args = parse_args() + if args.env_file: + _load_env_file(args.env_file) + svm, volume, size, aggregate, client_match = _resolve_config(args) if not aggregate: logger.error("--aggregate is required (or set AGGR_NAME in env / --env-file)") sys.exit(1) - policy_name = f"{volume}_export_policy" - with OntapClient.from_env() as client: - # Step 1 ΓÇö create volume (idempotent: skip if already exists) - # POST /storage/volumes to create a new FlexVol with a NAS junction path. - # Volume creation is asynchronous ΓÇö the response contains a job UUID. - existing_vol = client.get( - "/storage/volumes", - fields="name,uuid", - name=volume, - **{"svm.name": svm}, - ) - if existing_vol.get("records"): - logger.info("Volume '%s' already exists ΓÇö skipping create", volume) - else: - logger.info("Creating volume '%s' (%s) on SVM '%s'ΓǪ", volume, size, svm) - create_resp = client.post( - "/storage/volumes", - body={ - "name": volume, - "svm": {"name": svm}, - "aggregates": [{"name": aggregate}], - "size": size, - "nas": {"path": f"/{volume}"}, - }, - ) - - # Step 2 ΓÇö poll volume-creation job - # Block until the async job finishes before proceeding. - # poll_job raises RuntimeError if the job ends in a failure state. - job_uuid = create_resp["job"]["uuid"] - logger.info("Volume creation job: %s", job_uuid) - client.poll_job(job_uuid) - logger.info("Volume '%s' created successfully", volume) - - # Step 3 ΓÇö fetch volume UUID - # The UUID is required to PATCH the volume later when assigning the export policy. - # Filter by name + svm.name to pinpoint exactly the volume just created. - vol_resp = client.get( - "/storage/volumes", - fields="name,uuid", - name=volume, - **{"svm.name": svm}, - ) - if not vol_resp.get("records"): - raise RuntimeError(f"Volume '{volume}' not found on SVM '{svm}' after creation") - volume_uuid = vol_resp["records"][0]["uuid"] - - # Step 4 ΓÇö create export policy (idempotent: skip if already exists) - # Creates a dedicated policy named _export_policy scoped to the SVM. - # A per-volume policy makes it easy to manage access rules independently. - existing_policy = client.get( - "/protocols/nfs/export-policies", - fields="name,id", - name=policy_name, - **{"svm.name": svm}, - ) - if existing_policy.get("records"): - logger.info("Export policy '%s' already exists ΓÇö skipping create", policy_name) - else: - logger.info("Creating export policy '%s'ΓǪ", policy_name) - client.post( - "/protocols/nfs/export-policies", - body={"name": policy_name, "svm": {"name": svm}}, - ) - - # Step 5 ΓÇö fetch export policy ID - # The numeric ID is required when POSTing rules to the policy. - # Filter by name + svm.name to retrieve only this policy's record. - policy_resp = client.get( - "/protocols/nfs/export-policies", - fields="name,id", - name=policy_name, - **{"svm.name": svm}, - ) - if not policy_resp.get("records"): - raise RuntimeError( - f"Export policy '{policy_name}' not found on SVM '{svm}' after creation" - ) - policy_id = policy_resp["records"][0]["id"] - - # Step 6 ΓÇö add client rule (idempotent: skip if a matching rule already exists) - # POST a rule to the export policy allowing the given client IP or CIDR range. - # ro_rule, rw_rule, superuser = 'any' is suitable for lab; tighten for production. - existing_rules = client.get( - f"/protocols/nfs/export-policies/{policy_id}/rules", - fields="index,clients", - ) - rule_exists = any( - any(c.get("match") == client_match for c in r.get("clients", [])) - for r in existing_rules.get("records", []) - ) - if rule_exists: - logger.info("Client rule '%s' already exists in policy ΓÇö skipping", client_match) - else: - logger.info("Adding client rule '%s' to policyΓǪ", client_match) - client.post( - f"/protocols/nfs/export-policies/{policy_id}/rules", - body={ - "clients": [{"match": client_match}], - "ro_rule": ["any"], - "rw_rule": ["any"], - "superuser": ["any"], - }, - ) - - # Step 7 ΓÇö assign export policy to volume - # PATCH the volume's nas.export_policy field to link the policy. - # This makes the volume accessible to NFS clients that match the rule. - logger.info("Assigning export policy to volumeΓǪ") - patch_resp = client.patch( - f"/storage/volumes/{volume_uuid}", - body={"nas": {"export_policy": {"name": policy_name}}}, - ) - - # Step 8 ΓÇö poll assign-policy job - # The PATCH may return a job UUID if the operation is async. - # Only poll if a UUID was returned; sync responses skip this block. - if "job" in patch_resp: - client.poll_job(patch_resp["job"]["uuid"]) - - # Step 9 ΓÇö print summary - # Log a single success line with volume, size, SVM, mount path, - # export policy name, and client rule for quick confirmation. + volume_uuid = _ensure_volume(client, svm, volume, size, aggregate) + policy_id = _ensure_export_policy(client, svm, policy_name) + _ensure_client_rule(client, policy_id, client_match) + _assign_policy(client, volume_uuid, policy_name) logger.info( - "Γ£ô Volume '%s' (%s) created on SVM '%s' | Mount path: /%s | " - "Export policy '%s' created with client rule '%s' and assigned to volume", + "Volume '%s' (%s) provisioned on SVM '%s' | Mount path: /%s | " + "Export policy '%s' with client rule '%s' assigned", volume, size, svm, diff --git a/python/snapmirror_cleanup_test_failover.py b/python/snapmirror_cleanup_test_failover.py index ebdc61d..55f5ff2 100644 --- a/python/snapmirror_cleanup_test_failover.py +++ b/python/snapmirror_cleanup_test_failover.py @@ -64,7 +64,6 @@ def _env(key: str, default: str = "") -> str: - # Prefer value from INPUTS dict; fall back to environment variable. val = INPUTS.get(key) or os.environ.get(key, default) if not val: logger.error( @@ -118,6 +117,126 @@ def _pick_cluster_by_relationship( sys.exit(1) +def _find_tagged_clone(client: OntapClient, rel_uuid: str) -> dict | None: + """Return a dict with uuid/name/svm for the clone tagged ':test', or None.""" + resp = client.get( + "/storage/volumes", + fields="name,uuid,svm.name,state,nas.path", + **{"_tags": f"{rel_uuid}:test", "max_records": "1"}, + ) + if resp.get("num_records", 0) == 0: + return None + rec = resp["records"][0] + return { + "uuid": rec.get("uuid", ""), + "name": rec.get("name", ""), + "svm": rec.get("svm", {}).get("name", ""), + } + + +def _remove_smas_and_bring_online( + client: OntapClient, clone_uuid: str, clone_svm: str, clone_name: str +) -> None: + """Delete any SMAS relationship on the clone, then ensure the volume is online.""" + logger.info("=== Phase B: Remove SMAS relationship on clone (if any) ===") + smas_resp = client.get( + "/snapmirror/relationships", + fields="uuid,state", + **{ + "destination.path": f"{clone_svm}:{clone_name}", + "max_records": "10", + }, + ) + for smas_rel in smas_resp.get("records", []): + smas_uuid = smas_rel.get("uuid", "") + logger.info(" Deleting SMAS relationship %s on clone", smas_uuid) + try: + resp = client.delete( + f"/snapmirror/relationships/{smas_uuid}?return_timeout=120&force=true" + ) + job_uuid = resp.get("job", {}).get("uuid") + if job_uuid: + _poll_job(client, job_uuid) + except Exception as exc: + logger.warning("delete_smas_rel %s — %s (continuing)", smas_uuid, exc) + if smas_resp.get("num_records", 0) == 0: + logger.info(" No SMAS relationships found on clone — continuing") + try: + resp = client.patch( + f"/storage/volumes/{clone_uuid}?return_timeout=120", + body={"state": "online"}, + ) + job_uuid = resp.get("job", {}).get("uuid") + if job_uuid: + _poll_job(client, job_uuid) + except Exception as exc: + logger.warning("bring_online — %s (continuing)", exc) + + +def _unmount_clone(client: OntapClient, clone_uuid: str) -> None: + """Remove the NAS junction path; retries up to 6 times before aborting.""" + logger.info("=== Phase C: Unmount clone ===") + for attempt in range(1, 7): + try: + resp = client.patch( + f"/storage/volumes/{clone_uuid}?return_timeout=120", + body={"nas": {"path": ""}}, + ) + job_uuid = resp.get("job", {}).get("uuid") + if job_uuid: + _poll_job(client, job_uuid) + return + except Exception as exc: + logger.warning("unmount_clone attempt %d/6 — %s", attempt, exc) + if attempt < 6: + time.sleep(10) + logger.error("Failed to unmount clone after 6 attempts — aborting") + sys.exit(1) + + +def _offline_clone(client: OntapClient, clone_uuid: str) -> None: + """Set the volume state to offline (required before delete).""" + logger.info("=== Phase D: Offline clone ===") + try: + resp = client.patch( + f"/storage/volumes/{clone_uuid}?return_timeout=120", + body={"state": "offline"}, + ) + job_uuid = resp.get("job", {}).get("uuid") + if job_uuid: + _poll_job(client, job_uuid) + except Exception as exc: + logger.warning("offline_clone — %s", exc) + + +def _delete_and_confirm_clone( + client: OntapClient, clone_uuid: str, clone_name: str, dest_host: str +) -> None: + """Delete the clone volume and confirm it is gone.""" + logger.info("=== Phase E: Delete clone ===") + try: + resp = client.delete(f"/storage/volumes/{clone_uuid}?return_timeout=120") + job_uuid = resp.get("job", {}).get("uuid") + if job_uuid: + _poll_job(client, job_uuid) + except Exception as exc: + logger.warning("delete_clone — %s", exc) + confirm = client.get( + "/storage/volumes", + fields="name,uuid", + **{"uuid": clone_uuid, "max_records": "1"}, + ) + if confirm.get("num_records", 0) == 0: + logger.info( + "=== CLEANUP COMPLETE — clone '%s' deleted from cluster %s ===", + clone_name, + dest_host, + ) + else: + logger.error("Clone '%s' still exists after delete attempt", clone_name) + sys.exit(1) + + def main() -> None: cluster_a = _env("CLUSTER_A") cluster_b = _env("CLUSTER_B") @@ -126,10 +245,6 @@ def main() -> None: source_volume = _env("SOURCE_VOLUME") source_svm = _env("SOURCE_SVM") - # ── Phase 0: Find SM relationship on correct cluster ───────────────────── - # Search both clusters for the SnapMirror relationship matching the given - # source SVM and volume. Returns the cluster that owns the destination side - # so all subsequent API calls go to the correct cluster. logger.info("=== Phase 0: Find SnapMirror relationship ===") dest_host, rel = _pick_cluster_by_relationship( cluster_a, @@ -158,17 +273,9 @@ def main() -> None: ) with OntapClient(dest_host, dest_user, dest_pass, verify_ssl=False) as client: - # ── Phase A: Find tagged clone ──────────────────────────────────── - # Search for volumes tagged ':test' — only volumes created by - # snapmirror_test_failover.py carry this tag, preventing accidental - # deletion of manually created volumes with similar names. logger.info("=== Phase A: Find tagged clone ===") - tagged_resp = client.get( - "/storage/volumes", - fields="name,uuid,svm.name,state,nas.path", - **{"_tags": f"{rel_uuid}:test", "max_records": "1"}, - ) - if tagged_resp.get("num_records", 0) == 0: + clone = _find_tagged_clone(client, rel_uuid) + if clone is None: logger.info( "NO TAGGED CLONE FOUND for %s:%s on %s — nothing to clean up", source_svm, @@ -177,125 +284,17 @@ def main() -> None: ) return - clone = tagged_resp["records"][0] - clone_uuid = clone.get("uuid", "") - clone_name = clone.get("name", "") - clone_svm = clone.get("svm", {}).get("name", "") logger.info( "CLONE FOUND | name=%s | uuid=%s | svm=%s | cluster=%s", - clone_name, - clone_uuid, - clone_svm, + clone["name"], + clone["uuid"], + clone["svm"], dest_host, ) - - # ── Phase B: Delete SMAS relationship protecting the clone ──────────── - # The clone may be the destination of a SnapMirror Active Sync (SMAS) - # relationship. This holds an internal ONTAP job lock that prevents - # unmount and delete (errors 917536, 23003209). Delete the SMAS - # relationship first to release the lock entirely. - logger.info("=== Phase B: Remove SMAS relationship on clone (if any) ===") - smas_resp = client.get( - "/snapmirror/relationships", - fields="uuid,state", - **{ - "destination.path": f"{clone_svm}:{clone_name}", - "max_records": "10", - }, - ) - for smas_rel in smas_resp.get("records", []): - smas_uuid = smas_rel.get("uuid", "") - logger.info(" Deleting SMAS relationship %s on clone", smas_uuid) - try: - resp = client.delete( - f"/snapmirror/relationships/{smas_uuid}?return_timeout=120&force=true" - ) - job_uuid = resp.get("job", {}).get("uuid") - if job_uuid: - _poll_job(client, job_uuid) - except Exception as exc: - logger.warning("delete_smas_rel %s — %s (continuing)", smas_uuid, exc) - if smas_resp.get("num_records", 0) == 0: - logger.info(" No SMAS relationships found on clone — continuing") - - # Also bring online in case a previous failed run left it offline - try: - resp = client.patch( - f"/storage/volumes/{clone_uuid}?return_timeout=120", - body={"state": "online"}, - ) - job_uuid = resp.get("job", {}).get("uuid") - if job_uuid: - _poll_job(client, job_uuid) - except Exception as exc: - logger.warning("bring_online — %s (continuing)", exc) - - # ── Phase C: Unmount clone ────────────────────────────────────────── - # Remove the NAS junction path to unmount. Retry up to 6 times with a - # 10-second delay to let ONTAP fully release background locks. - logger.info("=== Phase C: Unmount clone ===") - unmounted = False - for attempt in range(1, 7): - try: - resp = client.patch( - f"/storage/volumes/{clone_uuid}?return_timeout=120", - body={"nas": {"path": ""}}, - ) - job_uuid = resp.get("job", {}).get("uuid") - if job_uuid: - _poll_job(client, job_uuid) - unmounted = True - break - except Exception as exc: - logger.warning("unmount_clone attempt %d/6 — %s", attempt, exc) - if attempt < 6: - time.sleep(10) - if not unmounted: - logger.error("Failed to unmount clone after 6 attempts — aborting") - sys.exit(1) - - # ── Phase D: Offline clone ─────────────────────────────────────────── - # Set the volume state to offline. A volume must be offline before it - # can be deleted in ONTAP. - logger.info("=== Phase D: Offline clone ===") - try: - resp = client.patch( - f"/storage/volumes/{clone_uuid}?return_timeout=120", - body={"state": "offline"}, - ) - job_uuid = resp.get("job", {}).get("uuid") - if job_uuid: - _poll_job(client, job_uuid) - except Exception as exc: - logger.warning("offline_clone — %s", exc) - - # ── Phase E: Delete clone ───────────────────────────────────────── - # DELETE the volume then verify removal by querying by UUID. - # Idempotent: if the volume is already gone, log success and exit cleanly. - logger.info("=== Phase E: Delete clone ===") - try: - resp = client.delete(f"/storage/volumes/{clone_uuid}?return_timeout=120") - job_uuid = resp.get("job", {}).get("uuid") - if job_uuid: - _poll_job(client, job_uuid) - except Exception as exc: - logger.warning("delete_clone — %s", exc) - - # Confirm deletion (idempotent: not-found = success) - confirm = client.get( - "/storage/volumes", - fields="name,uuid", - **{"uuid": clone_uuid, "max_records": "1"}, - ) - if confirm.get("num_records", 0) == 0: - logger.info( - "=== CLEANUP COMPLETE — clone '%s' deleted from cluster %s ===", - clone_name, - dest_host, - ) - else: - logger.error("Clone '%s' still exists after delete attempt", clone_name) - sys.exit(1) + _remove_smas_and_bring_online(client, clone["uuid"], clone["svm"], clone["name"]) + _unmount_clone(client, clone["uuid"]) + _offline_clone(client, clone["uuid"]) + _delete_and_confirm_clone(client, clone["uuid"], clone["name"], dest_host) if __name__ == "__main__": diff --git a/python/snapmirror_provision_dest_managed.py b/python/snapmirror_provision_dest_managed.py index f7676cf..4fa4c96 100644 --- a/python/snapmirror_provision_dest_managed.py +++ b/python/snapmirror_provision_dest_managed.py @@ -5,8 +5,6 @@ """SnapMirror Provision — Destination-Managed view. -Equivalent to: orchestrio run yaml-workflows/workflows/snapmirror_provision_dest_managed.yaml - All SnapMirror API calls driven from the DESTINATION cluster. Source RW volume must already exist; dest DP volume is auto-created. @@ -81,7 +79,6 @@ def _env(key: str, default: str = "") -> str: - # Prefer value from INPUTS dict; fall back to environment variable. val = INPUTS.get(key) or os.environ.get(key, default) if not val: logger.error( @@ -194,40 +191,31 @@ def _subnet24(ip: str) -> str: ) -def _setup_cluster_peer( - src: OntapClient, dst: OntapClient, source_svm: str, dest_svm: str -) -> tuple[str, str, str]: - """Ensure cluster peer exists between src and dst. - - Returns (src_peer_name, dst_peer_name, dst_peer_uuid). - Aborts with a clear error if intercluster LIFs are missing. - """ +def _fetch_created_peer_names(src: OntapClient, dst: OntapClient) -> tuple[str, str, str]: + """Return (src_peer_name, dst_peer_name, dst_peer_uuid) from current cluster peer records.""" _OK = {"available", "partial", "pending"} dst_cp = dst.get("/cluster/peers", fields="name,uuid,status.state", **{"max_records": "10"}) dst_peers = [p for p in dst_cp.get("records", []) if p.get("status", {}).get("state") in _OK] - if dst_peers: - p = dst_peers[0] - src_cp = src.get( - "/cluster/peers", fields="name,uuid,status.state", **{"max_records": "10"} - ) - src_peers = [ - q for q in src_cp.get("records", []) if q.get("status", {}).get("state") in _OK - ] - src_peer_name = src_peers[0]["name"] if src_peers else "" - src_ips = _get_ic_lif_ips(src) - dst_ips = _get_ic_lif_ips(dst) - logger.info( - "CLUSTER PEER | already peered — dst sees src as '%s' (state=%s) — skipping", - p["name"], - p.get("status", {}).get("state"), - ) - logger.info("IC LIFs | src=%s dst=%s", src_ips, dst_ips) - _check_ic_lif_preconditions(src, dst, src_ips, dst_ips) - return src_peer_name, p["name"], p["uuid"] + dst_peer = dst_peers[0] if dst_peers else {} + src_cp = src.get("/cluster/peers", fields="name,uuid,status.state", **{"max_records": "10"}) + src_peers = [p for p in src_cp.get("records", []) if p.get("status", {}).get("state") in _OK] + src_peer = src_peers[0] if src_peers else {} + logger.info("CLUSTER PEER | dst sees src as '%s'", dst_peer.get("name", "")) + return src_peer.get("name", ""), dst_peer.get("name", ""), dst_peer.get("uuid", "") - logger.info("CLUSTER PEER | no existing peer found — auto-creating") - src_ips = _get_ic_lif_ips(src) - dst_ips = _get_ic_lif_ips(dst) + +def _create_new_cluster_peer( + src: OntapClient, + dst: OntapClient, + src_ips: list[str], + dst_ips: list[str], + source_svm: str, + dest_svm: str, +) -> tuple[str, str, str]: + """POST a new cluster peer on both sides; return (src_name, dst_name, dst_uuid). + + Aborts if either cluster has no intercluster LIFs. + """ if not src_ips: logger.error( "ABORTED — no intercluster LIFs found on source cluster.\n" @@ -244,8 +232,6 @@ def _setup_cluster_peer( " -home-node -home-port e0d -address -netmask " ) sys.exit(1) - logger.info("CLUSTER PEER | src IC LIFs=%s dst IC LIFs=%s", src_ips, dst_ips) - _check_ic_lif_preconditions(src, dst, src_ips, dst_ips) try: resp = src.post( @@ -278,48 +264,50 @@ def _setup_cluster_peer( raise time.sleep(5) - dst_cp2 = dst.get("/cluster/peers", fields="name,uuid,status.state", **{"max_records": "10"}) - dst_peers2 = [p for p in dst_cp2.get("records", []) if p.get("status", {}).get("state") in _OK] - dst_peer_name = dst_peers2[0]["name"] if dst_peers2 else "" - dst_peer_uuid = dst_peers2[0]["uuid"] if dst_peers2 else "" - src_cp2 = src.get("/cluster/peers", fields="name,uuid,status.state", **{"max_records": "10"}) - src_peers2 = [p for p in src_cp2.get("records", []) if p.get("status", {}).get("state") in _OK] - src_peer_name = src_peers2[0]["name"] if src_peers2 else "" - logger.info("CLUSTER PEER | dst sees src as '%s'", dst_peer_name) - return src_peer_name, dst_peer_name, dst_peer_uuid + return _fetch_created_peer_names(src, dst) -def _setup_svm_peer( - src: OntapClient, - dst: OntapClient, - source_svm: str, - dest_svm: str, - src_peer_name: str, - dst_peer_name: str, - src_cluster_peer_uuid: str, -) -> str: - """Ensure SVM peer exists between dest_svm and source_svm. +def _setup_cluster_peer( + src: OntapClient, dst: OntapClient, source_svm: str, dest_svm: str +) -> tuple[str, str, str]: + """Ensure cluster peer exists between src and dst. - Returns the source SVM alias used in SnapMirror source paths. + Returns (src_peer_name, dst_peer_name, dst_peer_uuid). + Aborts with a clear error if intercluster LIFs are missing. """ - svm_resp = dst.get("/svm/peers", fields="uuid,name,state,peer", **{"svm.name": dest_svm}) - existing = [ - p - for p in svm_resp.get("records", []) - if p.get("state") in ("peered", "initiated") - and p.get("peer", {}).get("cluster", {}).get("uuid") == src_cluster_peer_uuid - ] - if existing: - alias = existing[0].get("peer", {}).get("svm", {}).get("name", source_svm) + _OK = {"available", "partial", "pending"} + dst_cp = dst.get("/cluster/peers", fields="name,uuid,status.state", **{"max_records": "10"}) + dst_peers = [p for p in dst_cp.get("records", []) if p.get("status", {}).get("state") in _OK] + if dst_peers: + p = dst_peers[0] + src_cp = src.get( + "/cluster/peers", fields="name,uuid,status.state", **{"max_records": "10"} + ) + src_peers = [ + q for q in src_cp.get("records", []) if q.get("status", {}).get("state") in _OK + ] + src_peer_name = src_peers[0]["name"] if src_peers else "" + src_ips = _get_ic_lif_ips(src) + dst_ips = _get_ic_lif_ips(dst) logger.info( - "SVM PEER | already peered '%s' <-> '%s' (alias='%s', state=%s) — skipping", - dest_svm, - source_svm, - alias, - existing[0].get("state"), + "CLUSTER PEER | already peered — dst sees src as '%s' (state=%s) — skipping", + p["name"], + p.get("status", {}).get("state"), ) - return alias + logger.info("IC LIFs | src=%s dst=%s", src_ips, dst_ips) + _check_ic_lif_preconditions(src, dst, src_ips, dst_ips) + return src_peer_name, p["name"], p["uuid"] + logger.info("CLUSTER PEER | no existing peer found — auto-creating") + src_ips = _get_ic_lif_ips(src) + dst_ips = _get_ic_lif_ips(dst) + logger.info("CLUSTER PEER | src IC LIFs=%s dst IC LIFs=%s", src_ips, dst_ips) + _check_ic_lif_preconditions(src, dst, src_ips, dst_ips) + return _create_new_cluster_peer(src, dst, src_ips, dst_ips, source_svm, dest_svm) + + +def _grant_svm_peer_permission(src: OntapClient, source_svm: str, src_peer_name: str) -> None: + """Grant SnapMirror peer-permission on the source SVM (idempotent).""" try: src.post( "/svm/peer-permissions", @@ -338,6 +326,11 @@ def _setup_svm_peer( logger.error("SVM PEER | peer-permission failed: %s", exc) raise + +def _create_svm_peer_relationship( + dst: OntapClient, dest_svm: str, source_svm: str, dst_peer_name: str +) -> None: + """Create the SVM peer relationship on the destination (idempotent).""" try: resp = dst.post( "/svm/peers", @@ -359,6 +352,41 @@ def _setup_svm_peer( logger.error("SVM PEER | create failed: %s", exc) raise + +def _setup_svm_peer( + src: OntapClient, + dst: OntapClient, + source_svm: str, + dest_svm: str, + src_peer_name: str, + dst_peer_name: str, + src_cluster_peer_uuid: str, +) -> str: + """Ensure SVM peer exists between dest_svm and source_svm. + + Returns the source SVM alias used in SnapMirror source paths. + """ + svm_resp = dst.get("/svm/peers", fields="uuid,name,state,peer", **{"svm.name": dest_svm}) + existing = [ + p + for p in svm_resp.get("records", []) + if p.get("state") in ("peered", "initiated") + and p.get("peer", {}).get("cluster", {}).get("uuid") == src_cluster_peer_uuid + ] + if existing: + alias = existing[0].get("peer", {}).get("svm", {}).get("name", source_svm) + logger.info( + "SVM PEER | already peered '%s' <-> '%s' (alias='%s', state=%s) — skipping", + dest_svm, + source_svm, + alias, + existing[0].get("state"), + ) + return alias + + _grant_svm_peer_permission(src, source_svm, src_peer_name) + _create_svm_peer_relationship(dst, dest_svm, source_svm, dst_peer_name) + svm_resp2 = dst.get("/svm/peers", fields="uuid,name,state,peer", **{"svm.name": dest_svm}) peers2 = [ p @@ -371,6 +399,134 @@ def _setup_svm_peer( return alias +def _phase_a_source_preflight( + src: OntapClient, source_svm: str, source_volume: str, source_host: str +) -> dict: + """Verify source cluster connectivity and validate the source volume. + + Returns the source volume record. Aborts if missing or DP type. + """ + src_cluster = src.get("/cluster", fields="name,version") + logger.info( + "SOURCE CLUSTER | name=%s | ontap=%s", + src_cluster.get("name"), + src_cluster.get("version", {}).get("full"), + ) + src_vol_resp = src.get( + "/storage/volumes", + fields="name,uuid,state,type,space.size", + **{"max_records": "1", "name": source_volume, "svm.name": source_svm}, + ) + if src_vol_resp.get("num_records", 0) == 0: + logger.error( + "ABORTED — source volume '%s' not found on %s", + source_volume, + source_host, + ) + sys.exit(1) + src_vol = src_vol_resp["records"][0] + if src_vol.get("type") == "dp": + logger.error("ABORTED — source volume is type=dp; specify the RW volume") + sys.exit(1) + logger.info( + "SOURCE VOLUME | svm=%s | name=%s | uuid=%s | state=%s | type=%s | size=%s", + source_svm, + src_vol["name"], + src_vol["uuid"], + src_vol["state"], + src_vol["type"], + src_vol.get("space", {}).get("size"), + ) + return src_vol + + +def _phase_d_setup_relationship( + src: OntapClient, + dst: OntapClient, + dest_svm: str, + dest_volume: str, + source_svm_alias: str, + source_volume: str, + peer_name: str, + sm_policy: str, +) -> str: + """Create and initialize the SnapMirror relationship; return its UUID.""" + existing = dst.get( + "/snapmirror/relationships", + fields="uuid,state,healthy", + **{"destination.path": f"{dest_svm}:{dest_volume}", "max_records": "1"}, + ) + logger.info("RELATIONSHIP CHECK | existing=%d", existing.get("num_records", 0)) + + try: + create_resp = dst.post( + "/snapmirror/relationships?return_timeout=120", + body={ + "source": { + "path": f"{source_svm_alias}:{source_volume}", + "cluster": {"name": peer_name}, + }, + "destination": {"path": f"{dest_svm}:{dest_volume}"}, + "policy": {"name": sm_policy}, + }, + ) + job_uuid = create_resp.get("job", {}).get("uuid") + if job_uuid: + _poll_job(dst, job_uuid) + except Exception as exc: + logger.info("create_and_initialize_relationship — %s (may already exist)", exc) + + rel_resp = dst.get( + "/snapmirror/relationships", + fields="uuid,source.path,destination.path,state,lag_time,healthy,policy.name", + **{"destination.path": f"{dest_svm}:{dest_volume}", "max_records": "1"}, + ) + rel_records = rel_resp.get("records", []) + if not rel_records: + logger.error( + "ABORTED — SnapMirror relationship not found for '%s:%s'", dest_svm, dest_volume + ) + sys.exit(1) + rel = rel_records[0] + rel_uuid = rel.get("uuid", "") + logger.info( + "RELATIONSHIP | uuid=%s | state=%s | healthy=%s | policy=%s", + rel_uuid, + rel.get("state"), + rel.get("healthy"), + rel.get("policy", {}).get("name"), + ) + + try: + dst.post( + f"/snapmirror/relationships/{rel_uuid}/transfers?return_timeout=120", + body={}, + ) + except Exception as exc: + exc_s = str(exc) + if "13303812" in exc_s: + src_ips = _get_ic_lif_ips(src) + dst_ips = _get_ic_lif_ips(dst) + logger.error( + "ABORTED — SnapMirror initialize failed: intercluster LIF connectivity issue.\n" + " Error : %s\n" + " src IC : %s\n" + " dst IC : %s\n" + " Cause : TCP ports 11104/11105 are likely blocked between these IPs.\n" + " Fix : Ask your lab admin to open TCP 11104 and 11105 between\n" + " %s <-> %s", + exc_s, + src_ips, + dst_ips, + src_ips[0] if src_ips else "", + dst_ips[0] if dst_ips else "", + ) + sys.exit(1) + logger.info("initialize_relationship — %s (may already be initialized)", exc) + + return rel_uuid + + def main() -> None: source_host = _env("SOURCE_HOST") source_user = _env("SOURCE_USER") @@ -390,47 +546,9 @@ def main() -> None: dst = OntapClient(dest_host, dest_user, dest_pass, verify_ssl=False) with src, dst: - # ── Phase A: Source pre-flight ──────────────────────────────────── - # Verify source cluster is reachable and the source volume is a - # writable (RW) type — DP volumes cannot be used as a SnapMirror source. logger.info("=== Phase A: Source pre-flight ===") - src_cluster = src.get("/cluster", fields="name,version") - logger.info( - "SOURCE CLUSTER | name=%s | ontap=%s", - src_cluster.get("name"), - src_cluster.get("version", {}).get("full"), - ) - - src_vol_resp = src.get( - "/storage/volumes", - fields="name,uuid,state,type,space.size", - **{"max_records": "1", "name": source_volume, "svm.name": source_svm}, - ) - if src_vol_resp.get("num_records", 0) == 0: - logger.error( - "ABORTED — source volume '%s' not found on %s", - source_volume, - source_host, - ) - sys.exit(1) - src_vol = src_vol_resp["records"][0] - if src_vol.get("type") == "dp": - logger.error("ABORTED — source volume is type=dp; specify the RW volume") - sys.exit(1) - logger.info( - "SOURCE VOLUME | svm=%s | name=%s | uuid=%s | state=%s | type=%s | size=%s", - source_svm, - src_vol["name"], - src_vol["uuid"], - src_vol["state"], - src_vol["type"], - src_vol.get("space", {}).get("size"), - ) + src_vol = _phase_a_source_preflight(src, source_svm, source_volume, source_host) - # ── Phase B: Dest pre-flight ────────────────────────────────────── - # Verify destination cluster connectivity, get the cluster peer name - # (required to reference the source from the destination side), and - # pick an aggregate to host the new destination DP volume. logger.info("=== Phase B: Dest pre-flight ===") dst_cluster = dst.get("/cluster", fields="name,version") logger.info( @@ -439,7 +557,6 @@ def main() -> None: dst_cluster.get("version", {}).get("full"), ) - # ── Phase B0: Cluster peer setup ────────────────────────────────── logger.info("=== Phase B0: Cluster peer setup ===") src_peer_name, peer_name, dst_peer_uuid = _setup_cluster_peer( src, dst, source_svm, dest_svm @@ -453,15 +570,11 @@ def main() -> None: aggr_name = aggr_resp.get("records", [{}])[0].get("name", "") logger.info("DEST AGGREGATE | name=%s", aggr_name) - # ── Phase B1: SVM peer setup ────────────────────────────────────── logger.info("=== Phase B1: SVM peer setup ===") source_svm_alias = _setup_svm_peer( src, dst, source_svm, dest_svm, src_peer_name, peer_name, dst_peer_uuid ) - # ── Phase C: Dest volume setup ──────────────────────────────────── - # Auto-create a DP volume on the destination to receive SnapMirror - # transfers. The create is skipped silently if the volume already exists. logger.info("=== Phase C: Dest volume setup ===") try: dst.post( @@ -497,91 +610,14 @@ def main() -> None: dst_vol.get("type"), ) - # ── Phase D: Relationship setup ─────────────────────────────────── - # Create and initialize the SnapMirror relationship from destination. - # If the relationship already exists the POST fails gracefully. - # After create, the relationship UUID is fetched and a baseline - # transfer is triggered explicitly to start data replication. logger.info("=== Phase D: Relationship setup ===") - existing = dst.get( - "/snapmirror/relationships", - fields="uuid,state,healthy", - **{"destination.path": f"{dest_svm}:{dest_volume}", "max_records": "1"}, - ) - logger.info("RELATIONSHIP CHECK | existing=%d", existing.get("num_records", 0)) - - try: - create_resp = dst.post( - "/snapmirror/relationships?return_timeout=120", - body={ - "source": { - "path": f"{source_svm_alias}:{source_volume}", - "cluster": {"name": peer_name}, - }, - "destination": {"path": f"{dest_svm}:{dest_volume}"}, - "policy": {"name": sm_policy}, - }, - ) - job_uuid = create_resp.get("job", {}).get("uuid") - if job_uuid: - _poll_job(dst, job_uuid) - except Exception as exc: - logger.info("create_and_initialize_relationship — %s (may already exist)", exc) - - rel_resp = dst.get( - "/snapmirror/relationships", - fields="uuid,source.path,destination.path,state,lag_time,healthy,policy.name", - **{"destination.path": f"{dest_svm}:{dest_volume}", "max_records": "1"}, - ) - rel_records = rel_resp.get("records", []) - if not rel_records: - logger.error( - "ABORTED — SnapMirror relationship not found for '%s:%s'", dest_svm, dest_volume - ) - sys.exit(1) - rel = rel_records[0] - rel_uuid = rel.get("uuid", "") - logger.info( - "RELATIONSHIP | uuid=%s | state=%s | healthy=%s | policy=%s", - rel_uuid, - rel.get("state"), - rel.get("healthy"), - rel.get("policy", {}).get("name"), + rel_uuid = _phase_d_setup_relationship( + src, dst, dest_svm, dest_volume, source_svm_alias, source_volume, peer_name, sm_policy ) - try: - dst.post( - f"/snapmirror/relationships/{rel_uuid}/transfers?return_timeout=120", - body={}, - ) - except Exception as exc: - exc_s = str(exc) - if "13303812" in exc_s: - src_ips = _get_ic_lif_ips(src) - dst_ips = _get_ic_lif_ips(dst) - logger.error( - "ABORTED — SnapMirror initialize failed: intercluster LIF connectivity issue.\n" - " Error : %s\n" - " src IC : %s\n" - " dst IC : %s\n" - " Cause : TCP ports 11104/11105 are likely blocked between these IPs.\n" - " Fix : Ask your lab admin to open TCP 11104 and 11105 between\n" - " %s <-> %s", - exc_s, - src_ips, - dst_ips, - src_ips[0] if src_ips else "", - dst_ips[0] if dst_ips else "", - ) - sys.exit(1) - logger.info("initialize_relationship — %s (may already be initialized)", exc) - - # ── Phase E: Convergence polling ────────────────────────────────── - # Poll the relationship until state=snapmirrored (baseline transfer done). logger.info("=== Phase E: Convergence polling ===") _wait_snapmirrored(dst, rel_uuid) - # ── Phase F: Final validation ───────────────────────────────────── logger.info("=== Phase F: Final validation ===") final = dst.get( f"/snapmirror/relationships/{rel_uuid}", @@ -609,6 +645,8 @@ def main() -> None: if __name__ == "__main__": try: main() + except KeyboardInterrupt: + sys.exit(130) except Exception: logger.exception("snapmirror_provision_dest_managed failed") sys.exit(1) diff --git a/python/snapmirror_provision_src_managed.py b/python/snapmirror_provision_src_managed.py index 9a15a26..481d8fe 100644 --- a/python/snapmirror_provision_src_managed.py +++ b/python/snapmirror_provision_src_managed.py @@ -71,7 +71,6 @@ def _env(key: str, default: str = "") -> str: - # Prefer value from INPUTS dict; fall back to environment variable. val = INPUTS.get(key) or os.environ.get(key, default) if not val: logger.error( @@ -129,9 +128,6 @@ def main() -> None: dst = OntapClient(dest_host, dest_user, dest_pass, verify_ssl=False) with src, dst: - # ── Phase A: Source pre-flight ─────────────────────────────────────────── - # Verify source cluster is reachable and the specified volume is a - # writable (RW) type. DP volumes cannot be used as a SnapMirror source. logger.info("=== Phase A: Source pre-flight ===") src_cluster = src.get("/cluster", fields="name,version") logger.info( @@ -165,10 +161,6 @@ def main() -> None: src_vol.get("space", {}).get("size"), ) - # ── Phase B: Dest pre-flight ───────────────────────────────────── - # Verify destination cluster connectivity. Retrieve the cluster peer name - # (used to reference the source from the destination side) and pick an - # available aggregate to host the new destination DP volume. logger.info("=== Phase B: Dest pre-flight ===") dst_cluster = dst.get("/cluster", fields="name,version") logger.info( @@ -194,10 +186,6 @@ def main() -> None: aggr_name = aggr_resp.get("records", [{}])[0].get("name", "") logger.info("DEST AGGREGATE | name=%s", aggr_name) - # ── Phase C: Auto-create dest DP volume ────────────────────────── - # Check if the destination DP volume already exists; create it if not. - # DP (data-protection) type volumes are required as SnapMirror destinations. - # Volume creation is skipped with a warning if it already exists. logger.info("=== Phase C: Dest volume setup ===") check_dest = dst.get( "/storage/volumes", @@ -236,10 +224,6 @@ def main() -> None: dst_vol.get("type"), ) - # ── Phase D: Create + initialize relationship ───────────────────── - # Create the SnapMirror relationship and trigger a baseline transfer. - # All relationship API calls are made from the destination cluster - # (ONTAP requirement). POST is skipped gracefully if it already exists. logger.info("=== Phase D: Relationship setup ===") existing = dst.get( "/snapmirror/relationships", @@ -266,10 +250,6 @@ def main() -> None: except Exception as exc: logger.warning("create_and_initialize_relationship — %s (may already exist)", exc) - # ── Phase E: Convergence polling ───────────────────────────────── - # Fetch the relationship UUID, trigger a baseline transfer explicitly, - # then poll until state=snapmirrored confirming initial replication is done. - # Times out after 30 minutes. logger.info("=== Phase E: Convergence polling ===") rel_resp = dst.get( "/snapmirror/relationships", @@ -295,9 +275,6 @@ def main() -> None: _wait_snapmirrored(dst, rel_uuid) - # ── Phase F: Final validation ───────────────────────────────────── - # Fetch the final relationship state and print a human-readable summary - # with source, destination, health status, policy, and lag time. logger.info("=== Phase F: Final validation ===") final = dst.get( f"/snapmirror/relationships/{rel_uuid}", diff --git a/python/snapmirror_test_failover.py b/python/snapmirror_test_failover.py index 3165778..48b2aba 100644 --- a/python/snapmirror_test_failover.py +++ b/python/snapmirror_test_failover.py @@ -65,7 +65,6 @@ def _env(key: str, default: str = "") -> str: - # Prefer value from INPUTS dict; fall back to environment variable. val = INPUTS.get(key) or os.environ.get(key, default) if not val: logger.error( @@ -148,10 +147,6 @@ def main() -> None: dest_pass = _env("DEST_PASS") source_volume = INPUTS.get("SOURCE_VOLUME") or os.environ.get("SOURCE_VOLUME", "*") - # ── Phase 0: Pick cluster ───────────────────────────────────────────── - # Scan both clusters to find which one holds the target DP volume. - # AUTO mode (SOURCE_VOLUME=* or unset): picks the most recently created - # DP volume. TARGETED mode: finds _dest on either cluster. logger.info("=== Phase 0: Auto-detect target cluster ===") dest_host, dp_vol = _pick_cluster(cluster_a, cluster_b, dest_user, dest_pass, source_volume) dp_vol_name = dp_vol["name"] @@ -168,8 +163,7 @@ def main() -> None: ) with OntapClient(dest_host, dest_user, dest_pass, verify_ssl=False) as client: - # ── Phase A: Pre-flight ──────────────────────────────────────────── # Verify the destination cluster is reachable and retrieve the SnapMirror - # relationship details (source, state, health, lag time) for the DP volume. logger.info("=== Phase A: Pre-flight ===") + logger.info("=== Phase A: Pre-flight ===") cluster = client.get("/cluster", fields="name,version") logger.info( "DEST CLUSTER | name=%s | ontap=%s", @@ -194,9 +188,7 @@ def main() -> None: rel.get("lag_time"), ) - # ── Phase B: Get latest snapshot ───────────────────────────────── # Fetch the most recent SnapMirror snapshot on the DP volume. - # The FlexClone must be based on a SnapMirror snapshot to guarantee - # a consistent point-in-time copy of the replicated data. logger.info("=== Phase B: Get latest SnapMirror snapshot ===") + logger.info("=== Phase B: Get latest SnapMirror snapshot ===") snap_resp = client.get( f"/storage/volumes/{dp_vol_uuid}/snapshots", fields="name,create_time", @@ -215,9 +207,7 @@ def main() -> None: snap_resp["records"][0].get("create_time"), ) - # ── Phase C: Create FlexClone ───────────────────────────────────── # Create a writable FlexClone of the DP volume from the latest SnapMirror - # snapshot. The clone gets a NAS junction path so it can be mounted - # immediately on a test client without touching the source data. logger.info("=== Phase C: Create FlexClone ===") + logger.info("=== Phase C: Create FlexClone ===") clone_name = f"{dp_vol_name}_clone" try: clone_resp = client.post( @@ -239,10 +229,6 @@ def main() -> None: except Exception as exc: logger.warning("create_test_clone — %s (may already exist)", exc) - # ── Phase D: Verify clone + tag it ──────────────────────────────── - # Confirm the clone is online and retrieve its UUID and junction path. - # Tag it with the SM relationship UUID (':test') so the cleanup - # script can identify and delete only test clones, never other volumes. logger.info("=== Phase D: Verify clone + tag ===") clone_vol_resp = client.get( "/storage/volumes", @@ -284,10 +270,6 @@ def main() -> None: dp_svm_name, ) - # ── Phase E: Resync SnapMirror ────────────────────────────────────── - # Resume SnapMirror replication after the test clone was created. - # The test clone remains accessible while resync runs in the background. - # Polls until state=snapmirrored to confirm replication is healthy again. logger.info("=== Phase E: Resync SnapMirror ===") try: resync_resp = client.patch( From 740ea3a58eff2370e7f436a5d28dd4a16be3b44e Mon Sep 17 00:00:00 2001 From: Somanath Date: Tue, 12 May 2026 16:59:33 +0530 Subject: [PATCH 3/3] fix: remove duplicate --fail flag from TruffleHog extra_args --- .github/workflows/pr-guard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-guard.yml b/.github/workflows/pr-guard.yml index 52a853e..eecf781 100644 --- a/.github/workflows/pr-guard.yml +++ b/.github/workflows/pr-guard.yml @@ -34,7 +34,7 @@ jobs: - name: TruffleHog scan uses: trufflesecurity/trufflehog@v3.94.3 with: - extra_args: --only-verified --fail + extra_args: --only-verified yaml-syntax: name: Validate YAML syntax