diff --git a/src/azure-cli/azure/cli/command_modules/appservice/_help.py b/src/azure-cli/azure/cli/command_modules/appservice/_help.py index 5b7b5b1da03..a15cc5c3a1b 100644 --- a/src/azure-cli/azure/cli/command_modules/appservice/_help.py +++ b/src/azure-cli/azure/cli/command_modules/appservice/_help.py @@ -2597,12 +2597,16 @@ helps['webapp sitecontainers convert'] = """ type: command -short-summary: Convert a webapp from sitecontainers to a classic custom container and vice versa. +short-summary: Convert a webapp from sitecontainers to a classic custom container and vice versa. Supports both single-container (DOCKER|) and multi-container (COMPOSE|) apps. examples: - name: Convert a webapp to classic custom container (docker) from sitecontainers text: az webapp sitecontainers convert --mode docker --name MyWebApp --resource-group MyResourceGroup - - name: Convert a webapp to sitecontainers from classic custom container (docker) + - name: Convert a single-container webapp (DOCKER|) to sitecontainers text: az webapp sitecontainers convert --mode sitecontainers --name MyWebApp --resource-group MyResourceGroup + - name: Convert a multi-container webapp (COMPOSE|) to sitecontainers + text: az webapp sitecontainers convert --mode sitecontainers --name MyWebApp --resource-group MyResourceGroup + - name: Convert a COMPOSE app to sitecontainers specifying which service is the main container + text: az webapp sitecontainers convert --mode sitecontainers --name MyWebApp --resource-group MyResourceGroup --main-container-name web """ diff --git a/src/azure-cli/azure/cli/command_modules/appservice/_params.py b/src/azure-cli/azure/cli/command_modules/appservice/_params.py index 1544d99df6c..bdc992a081f 100644 --- a/src/azure-cli/azure/cli/command_modules/appservice/_params.py +++ b/src/azure-cli/azure/cli/command_modules/appservice/_params.py @@ -383,6 +383,10 @@ def load_arguments(self, _): with self.argument_context("webapp sitecontainers convert") as c: c.argument('mode', options_list=['--mode'], help='Mode for conversion.', arg_type=get_enum_type(['docker', 'sitecontainers'])) + c.argument('main_container_name', options_list=['--main-container-name'], + help='For COMPOSE to sitecontainers conversion, specifies which ' + 'compose service should be the main container. If not provided, ' + 'the service with a port mapping is auto-detected.') with self.argument_context('webapp show') as c: c.argument('name', arg_type=webapp_name_arg_type) diff --git a/src/azure-cli/azure/cli/command_modules/appservice/custom.py b/src/azure-cli/azure/cli/command_modules/appservice/custom.py index dc510fe4d4b..9091dd433a0 100644 --- a/src/azure-cli/azure/cli/command_modules/appservice/custom.py +++ b/src/azure-cli/azure/cli/command_modules/appservice/custom.py @@ -2125,18 +2125,45 @@ def get_webapp_sitecontainer_log(cmd, name, resource_group, container_name, slot raise AzureInternalError("Failed to fetch sitecontainer logs. Error: {}".format(str(ex))) -def convert_webapp_sitecontainers(cmd, name, resource_group, mode, slot=None): +def convert_webapp_sitecontainers(cmd, name, resource_group, mode, slot=None, main_container_name=None): """ - Convert a webapp between classic (docker) and sitecontainers mode. + Convert a webapp between classic (docker/compose) and sitecontainers mode. :param cmd: CLI command context :param name: Name of the webapp :param resource_group: Resource group of the webapp :param mode: Target mode, either 'docker' or 'sitecontainers' :param slot: Optional deployment slot + :param main_container_name: For compose conversion, the name of the service to be the main container """ + if not slot and mode == 'sitecontainers': + logger.warning("") + logger.warning("WARNING: You are about to convert the production site directly. " + "It is recommended to perform the conversion on a deployment slot first, " + "verify the result, and then swap the slot into production.") + logger.warning("") + logger.warning("If you proceed on production and need to roll back:") + logger.warning(" 1. Save your current config first:") + logger.warning(" az webapp config show -g %s -n %s --query linuxFxVersion -o tsv", + resource_group, name) + logger.warning(" 2. Delete all sitecontainers created by the conversion:") + logger.warning(" az webapp sitecontainers list -g %s -n %s " + "--query \"[].name\" -o tsv", resource_group, name) + logger.warning(" az webapp sitecontainers delete -g %s -n %s " + "--container-name ", resource_group, name) + logger.warning(" (Repeat for each sitecontainer)") + logger.warning(" 3. Review and delete any app settings prefixed with COMPOSE_") + logger.warning(" that were added during conversion.") + logger.warning(" 4. Restore the saved linuxFxVersion:") + logger.warning(" az webapp config set -g %s -n %s " + "--linux-fx-version \"\"", resource_group, name) + logger.warning("") + if not prompt_y_n("Do you want to continue with the conversion on the production site?"): + logger.warning("Conversion aborted. Use '--slot ' to convert a deployment slot instead.") + return None + if mode == 'sitecontainers': - _convert_webapp_to_sitecontainers(cmd, name, resource_group, slot) + _convert_webapp_to_sitecontainers(cmd, name, resource_group, slot, main_container_name) elif mode == 'docker': _convert_webapp_to_docker(cmd, name, resource_group, slot) else: @@ -2441,14 +2468,27 @@ def _build_plan_default_identity_sdk(default_identity): } -def _convert_webapp_to_sitecontainers(cmd, name, resource_group, slot): +def _convert_webapp_to_sitecontainers(cmd, name, resource_group, slot, main_container_name=None): site_config = get_site_configs(cmd, resource_group, name, slot) linux_fx_version = getattr(site_config, "linux_fx_version", None) - if linux_fx_version and not linux_fx_version.startswith('DOCKER|'): - raise ValidationError("Cannot convert to sitecontainers mode as site is not a " - "classic custom container (docker) app.") + is_compose = linux_fx_version and linux_fx_version.startswith('COMPOSE|') + is_docker = linux_fx_version and linux_fx_version.startswith('DOCKER|') + + if not is_compose and not is_docker: + raise ValidationError("Cannot convert to sitecontainers mode. The site must be a " + "classic custom container (DOCKER|) or multi-container (COMPOSE|) app. " + "Current linuxFxVersion: '{}'".format(linux_fx_version or '(empty)')) + + if is_compose: + _convert_compose_to_sitecontainers(cmd, name, resource_group, slot, site_config, + linux_fx_version, main_container_name) + else: + _convert_docker_to_sitecontainers(cmd, name, resource_group, slot, site_config, linux_fx_version) + +def _convert_docker_to_sitecontainers(cmd, name, resource_group, slot, site_config, linux_fx_version): + """Convert a single-container DOCKER| app to sitecontainers mode.""" acr_use_managed_identity_creds = getattr(site_config, "acr_use_managed_identity_creds", None) acr_user_managed_identity_id = getattr(site_config, "acr_user_managed_identity_id", None) acr_user_name = None @@ -2460,7 +2500,7 @@ def _convert_webapp_to_sitecontainers(cmd, name, resource_group, slot): slot_segment = f"/slots/{slot}" if slot else "" url = ( f"/subscriptions/{subscription_id}/resourceGroups/{resource_group}/" - f"providers/Microsoft.Web/sites/{name}{slot_segment}/config/appsettings/list?api-version=2023-12-01" + f"providers/Microsoft.Web/sites/{name}{slot_segment}/config/appsettings/list?api-version=2024-11-01" ) request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + url response = send_raw_request(cmd.cli_ctx, "POST", request_url) @@ -2521,6 +2561,642 @@ def _convert_webapp_to_sitecontainers(cmd, name, resource_group, slot): logger.warning("Webapp '%s' converted to sitecontainers mode.", name) +# --------------------------------------------------------------------------- +# Compose → Sitecontainers conversion +# --------------------------------------------------------------------------- +# The following constants and functions parse a Docker Compose YAML (as stored +# in linuxFxVersion as COMPOSE|) and produce SiteContainer ARM objects. +# The parsing mirrors what the LWAS v1 ComposeFileParser.cs actually accepted, +# and the volume-mapping logic matches AppSpecConverter.cs in LWASv2. +# --------------------------------------------------------------------------- + +_COMPOSE_WEBAPP_STORAGE_HOME = "${WEBAPP_STORAGE_HOME}" +_COMPOSE_SIDECAR_HOME_MOUNT = "/home" + +# Compose fields that are recognized by the LWAS v1 orchestrator. Everything +# else is silently ignored there, but we warn the user so they know what will +# not carry over. +_COMPOSE_SUPPORTED_SERVICE_KEYS = frozenset([ + "image", "restart", "entrypoint", "command", "environment", "ports", "volumes", +]) + +# Top-level compose keys the old orchestrator recognized (even if it ignored +# some, like "networks"). +_COMPOSE_SUPPORTED_TOP_KEYS = frozenset([ + "version", "services", "networks", "volumes", +]) + +# Service-level keys that are NOT supported in Sidecars and merit a warning +_COMPOSE_UNSUPPORTED_KEYS = frozenset([ + "build", "depends_on", "links", "networks", "secrets", "deploy", + "healthcheck", "logging", "dns", "dns_search", "extra_hosts", + "cap_add", "cap_drop", "privileged", "read_only", "tmpfs", + "security_opt", "sysctls", "ulimits", "devices", "labels", + "stop_signal", "stop_grace_period", "working_dir", "domainname", + "hostname", "ipc", "pid", "shm_size", "stdin_open", "tty", "user", +]) + + +def _parse_compose_entrypoint_or_command(value): + """Parse a Compose entrypoint or command value (string or list) into a list of tokens. + + Mirrors ComposeFileParser.ParseEntryPoint / ParseCommand: + - Scalar string → split on whitespace + - Sequence → use items as-is + """ + if value is None: + return [] + if isinstance(value, list): + return [str(v) for v in value] + # Scalar string – split on whitespace (matching ComposeFileParser.TokenizeString) + return str(value).split() + + +def _merge_entrypoint_command(entrypoint_tokens, command_tokens): + """Merge entrypoint and command into a single startUpCommand string. + + In Docker/Compose semantics, ENTRYPOINT and CMD are separate concepts, but + the Sidecar model has a single ``startUpCommand`` field. We concatenate + them (entrypoint first, then command arguments) which is the effective + behaviour of ``docker run``. + + Returns None if both are empty so that the platform default is used. + """ + merged = entrypoint_tokens + command_tokens + if not merged: + return None + return " ".join(merged) + + +def _parse_compose_environment(env_node): + """Parse Compose environment into a dict of {NAME: VALUE}. + + Supports both formats that ComposeFileParser handles: + - Mapping: ``environment: { KEY: VALUE, ... }`` + - Sequence: ``environment: [ "KEY=VALUE", ... ]`` + """ + if env_node is None: + return {} + if isinstance(env_node, dict): + return {str(k): str(v) if v is not None else "" for k, v in env_node.items()} + if isinstance(env_node, list): + result = {} + for item in env_node: + item_str = str(item) + idx = item_str.find('=') + if idx > 0: + result[item_str[:idx]] = item_str[idx + 1:] + elif idx == 0: + logger.warning(" [env] Skipping environment entry with empty name: '%s'", item_str) + else: + # No '=' means the value comes from an existing app setting (name-only reference) + result[item_str] = "" + return result + logger.warning(" [env] Unexpected environment format (not dict or list). Skipping.") + return {} + + +def _parse_compose_ports(ports_node): + """Parse Compose ports into a list of (host_port, container_port) tuples. + + Only the ``host:container`` short syntax is parsed (matching + ComposeFileParser.ParsePorts). Returns a list of tuples. + """ + if ports_node is None: + return [] + ports = [] + for item in ports_node: + mapping = str(item) + parts = mapping.split(':') + if len(parts) >= 2: + try: + host_port = int(parts[0]) + container_port = int(parts[1]) + ports.append((host_port, container_port)) + except ValueError: + logger.warning(" [ports] Skipping invalid port mapping: '%s'", mapping) + else: + # single port (no host mapping) – treat as container port + try: + container_port = int(parts[0]) + ports.append((None, container_port)) + except ValueError: + logger.warning(" [ports] Skipping invalid port value: '%s'", mapping) + return ports + + +def _parse_compose_volumes(volumes_node, top_level_volumes): + """Parse Compose service volumes into sidecar VolumeMount dicts. + + Handles both short syntax (``source:target``) and long syntax (mapping with + type/source/target) – mirroring ComposeFileParser.ParseContainerVolumes. + + For the Sidecar model, ``volumeSubPath`` must be an absolute path under + ``/home`` (which maps to ``${WEBAPP_STORAGE_HOME}`` from Compose). Named + volumes without a ``/home`` path are mapped to a local share path. + + Returns: + A list of dicts with keys: volume_sub_path, container_mount_path, + read_only. Also returns a list of warning strings for unsupported + volumes. + """ + mounts = [] + warnings = [] + if volumes_node is None: + return mounts, warnings + + for item in volumes_node: + if isinstance(item, dict): + # Long syntax: { type: bind|volume, source: ..., target: ... } + vol_type = item.get("type", "volume") + source = item.get("source", "") + target = item.get("target", "") + read_only = item.get("read_only", False) + + if not source: + warnings.append(f" [volumes] Skipping volume with empty source (target='{target}').") + continue + if not target: + warnings.append(f" [volumes] Skipping volume with empty target (source='{source}').") + continue + + if vol_type == "bind": + mount = _make_bind_mount(source, target, read_only, warnings) + if mount: + mounts.append(mount) + else: + # Named volume – resolve against top-level volumes + mount = _make_named_volume_mount(source, target, read_only, top_level_volumes, warnings) + if mount: + mounts.append(mount) + else: + # Short syntax: "source:target" or "source:target:ro" + parts = str(item).split(':') + if len(parts) >= 2: + source = parts[0] + target = parts[1] + read_only = len(parts) >= 3 and parts[2].strip().lower() == 'ro' + else: + warnings.append(f" [volumes] Skipping unrecognised volume entry: '{item}'") + continue + + if source.startswith(_COMPOSE_WEBAPP_STORAGE_HOME): + mount = _make_bind_mount(source, target, read_only, warnings) + if mount: + mounts.append(mount) + elif any(c in source for c in ('/', '\\', '$')): + warnings.append( + f" [volumes] UNSUPPORTED bind mount '{source}:{target}'. " + f"Only bind mounts starting with {_COMPOSE_WEBAPP_STORAGE_HOME} are supported." + ) + else: + # Named volume + mount = _make_named_volume_mount(source, target, read_only, top_level_volumes, warnings) + if mount: + mounts.append(mount) + + return mounts, warnings + + +def _make_bind_mount(source, target, read_only, warnings): + """Convert a ${WEBAPP_STORAGE_HOME}/... bind mount to a sidecar VolumeMount. + + In Compose, ``${WEBAPP_STORAGE_HOME}`` is the /home mount point. In the + Sidecar model, ``volumeSubPath`` is an absolute path under /home. + Example: ``${WEBAPP_STORAGE_HOME}/site/wwwroot`` → ``/home/site/wwwroot``. + """ + if not source.startswith(_COMPOSE_WEBAPP_STORAGE_HOME): + warnings.append( + f" [volumes] UNSUPPORTED bind mount source '{source}'. " + f"Bind mounts must start with {_COMPOSE_WEBAPP_STORAGE_HOME}." + ) + return None + + # Strip the ${WEBAPP_STORAGE_HOME} prefix and map to /home/... + sub_path = source[len(_COMPOSE_WEBAPP_STORAGE_HOME):] + if not sub_path: + sub_path = "/" + elif not sub_path.startswith('/'): + sub_path = '/' + sub_path + + volume_sub_path = _COMPOSE_SIDECAR_HOME_MOUNT + sub_path if sub_path != '/' else _COMPOSE_SIDECAR_HOME_MOUNT + + return { + "volume_sub_path": volume_sub_path, + "container_mount_path": target, + "read_only": read_only, + } + + +def _make_named_volume_mount(vol_name, target, read_only, top_level_volumes, warnings): # pylint: disable=unused-argument + """Convert a named volume to a sidecar VolumeMount. + + Named volumes in Compose are typically Docker-managed volumes that are + local to the instance. In the Sidecar model these map to the local + ephemeral share (``CustomLocalShare`` in LWASv2) via a volumeSubPath + that does NOT start with ``/home``. We use ``/compose/volumes/`` + so the data stays on local (non-persistent) storage, which matches + Docker named volume semantics. If persistence is needed, users should + switch to a ``${WEBAPP_STORAGE_HOME}`` bind mount instead. + """ + warnings.append( + f" [volumes] Named volume '{vol_name}' mapped to '/compose/volumes/{vol_name}' → '{target}'. " + f"This uses LOCAL (ephemeral) storage, matching Docker named volume behaviour. " + f"Data will NOT survive a restart. If you need persistence, use a " + f"{_COMPOSE_WEBAPP_STORAGE_HOME} bind mount instead." + ) + return { + "volume_sub_path": f"/compose/volumes/{vol_name}", + "container_mount_path": target, + "read_only": read_only, + } + + +def _sanitize_container_name(service_name): + """Sanitize a Compose service name for use as a sitecontainer name. + + Sitecontainer names must be alphanumeric with hyphens, no underscores. + """ + # Replace underscores/dots/spaces with hyphens, then strip non-alphanum-hyphen chars + sanitized = re.sub(r'[^a-zA-Z0-9-]', '-', service_name) + # Collapse consecutive hyphens + sanitized = re.sub(r'-+', '-', sanitized).strip('-') + return sanitized.lower() or "container" + + +def _convert_compose_to_sitecontainers(cmd, name, resource_group, slot, # pylint: disable=too-many-branches + site_config, linux_fx_version, main_container_name=None): + """Convert a COMPOSE| multi-container app to sitecontainers mode. + + Steps: + 1. Decode & parse the compose YAML from linuxFxVersion + 2. Extract services with image, entrypoint, command, environment, ports, volumes + 3. Determine authentication (shared ACR config for all services) + 4. Create app settings for inline environment variables + 5. Map volumes (${WEBAPP_STORAGE_HOME} → /home VolumeSubPath) + 6. Create sitecontainer resources via ARM + 7. Set linuxFxVersion to SITECONTAINERS + """ + import yaml + from base64 import b64decode + from azure.mgmt.web.models import VolumeMount, EnvironmentVariable + + # ----------------------------------------------------------------------- + # Step 1: Decode and parse compose YAML + # ----------------------------------------------------------------------- + compose_b64 = linux_fx_version.split('|', 1)[1] + try: + compose_yaml_str = b64decode(compose_b64.encode('utf-8')).decode('utf-8') + except Exception as ex: + raise ValidationError(f"Failed to base64-decode the COMPOSE configuration: {ex}") + + try: + compose = yaml.safe_load(compose_yaml_str) + except Exception as ex: + raise ValidationError(f"Failed to parse COMPOSE YAML: {ex}") + + if not isinstance(compose, dict) or 'services' not in compose: + raise ValidationError("Invalid Docker Compose file: missing 'services' section.") + + services = compose.get('services', {}) + if not services: + raise ValidationError("Docker Compose file has no services defined.") + + top_level_volumes = compose.get('volumes', {}) or {} + + # Warn about unrecognised top-level keys + for key in compose: + if key not in _COMPOSE_SUPPORTED_TOP_KEYS: + logger.warning("WARNING: Top-level Compose key '%s' is not supported and will be ignored.", key) + + # ----------------------------------------------------------------------- + # Step 2: Get shared ACR auth configuration + # ----------------------------------------------------------------------- + acr_use_managed_identity_creds = getattr(site_config, "acr_use_managed_identity_creds", None) + acr_user_managed_identity_id = getattr(site_config, "acr_user_managed_identity_id", None) + + from azure.cli.core.commands.client_factory import get_subscription_id + subscription_id = get_subscription_id(cmd.cli_ctx) + slot_segment = f"/slots/{slot}" if slot else "" + url = ( + f"/subscriptions/{subscription_id}/resourceGroups/{resource_group}/" + f"providers/Microsoft.Web/sites/{name}{slot_segment}/config/appsettings/list?api-version=2024-11-01" + ) + request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + url + response = send_raw_request(cmd.cli_ctx, "POST", request_url) + app_settings_raw = response.json() + existing_app_settings = app_settings_raw.get("properties", {}) + + acr_user_password = existing_app_settings.get("DOCKER_REGISTRY_SERVER_PASSWORD", None) + acr_user_name = existing_app_settings.get("DOCKER_REGISTRY_SERVER_USERNAME", None) + + # ----------------------------------------------------------------------- + # Step 3: Parse each service + # ----------------------------------------------------------------------- + all_warnings = [] + new_app_settings = {} # Will be created as app settings for env var references + sitecontainer_specs = [] + service_names = list(services.keys()) + seen_ports = {} # port → service_name for conflict detection + services_with_ports = [] + + for svc_name in service_names: + svc = services[svc_name] + if not isinstance(svc, dict): + all_warnings.append(f"WARNING: Service '{svc_name}' is not a valid mapping. Skipping.") + continue + + container_name = _sanitize_container_name(svc_name) + logger.warning("Processing service '%s' (container name: '%s')...", svc_name, container_name) + + # Warn about unsupported keys + for key in svc: + if key in _COMPOSE_UNSUPPORTED_KEYS: + all_warnings.append( + f" [{svc_name}] WARNING: Key '{key}' is not supported in Sidecars and will be ignored." + ) + elif key not in _COMPOSE_SUPPORTED_SERVICE_KEYS: + all_warnings.append( + f" [{svc_name}] INFO: Unrecognised key '{key}' will be ignored." + ) + + # --- Image --- + image = svc.get('image') + if not image: + raise ValidationError( + f"Service '{svc_name}' does not have an 'image' specified. " + f"Sidecars require a pre-built image; 'build' is not supported." + ) + + # --- Entrypoint + Command → startUpCommand --- + entrypoint_tokens = _parse_compose_entrypoint_or_command(svc.get('entrypoint')) + command_tokens = _parse_compose_entrypoint_or_command(svc.get('command')) + startup_command = _merge_entrypoint_command(entrypoint_tokens, command_tokens) + if entrypoint_tokens and command_tokens: + all_warnings.append( + f" [{svc_name}] INFO: Both 'entrypoint' and 'command' were specified. " + f"They have been merged into a single startUpCommand: '{startup_command}'. " + f"Verify this behaves as expected." + ) + + # --- Ports --- + ports = _parse_compose_ports(svc.get('ports')) + target_port = None + if ports: + services_with_ports.append(svc_name) + # Use the container port of the first port mapping + _, container_port = ports[0] + target_port = str(container_port) + + if len(ports) > 1: + all_warnings.append( + f" [{svc_name}] WARNING: Multiple port mappings found ({[f'{h}:{c}' for h, c in ports]}). " + f"Only the first container port ({container_port}) will be used as targetPort. " + f"In Sidecars, all containers share the same network namespace (localhost), " + f"so each container must listen on a unique port." + ) + + # Detect port conflicts + if target_port in seen_ports: + all_warnings.append( + f" [{svc_name}] CRITICAL: Port {target_port} conflicts with service " + f"'{seen_ports[target_port]}'. In Sidecars, all containers share the same " + f"network namespace. Each container MUST use a unique port." + ) + else: + seen_ports[target_port] = svc_name + + # Warn about host:container port differences + for host_port, cont_port in ports: + if host_port is not None and host_port != cont_port: + all_warnings.append( + f" [{svc_name}] WARNING: Host port ({host_port}) differs from container port " + f"({cont_port}). In Sidecars, all containers share a single network namespace, " + f"so the host:container port mapping is ignored. Only the container port is used." + ) + + # --- Environment Variables --- + env_dict = _parse_compose_environment(svc.get('environment')) + env_variables = [] + if env_dict: + all_warnings.append( + f" [{svc_name}] INFO: {len(env_dict)} environment variable(s) found. In the Sidecar model, " + f"environment variable 'value' is a REFERENCE to an App Setting name (not the literal value). " + f"App settings will be created/updated for each variable." + ) + for env_name, env_value in env_dict.items(): + # Create an app setting with a namespaced key to avoid collisions + # Convention: COMPOSE__ as the app setting name + app_setting_key = f"COMPOSE_{_sanitize_container_name(svc_name).upper().replace('-', '_')}_{env_name}" + if env_value: + new_app_settings[app_setting_key] = env_value + else: + # Value-less env var: check if there is an existing app setting with same name + if env_name in existing_app_settings: + app_setting_key = env_name # Reference the existing app setting directly + else: + new_app_settings[app_setting_key] = "" + all_warnings.append( + f" [{svc_name}] WARNING: Environment variable '{env_name}' has no value and " + f"no matching app setting exists. An empty app setting '{app_setting_key}' " + f"will be created." + ) + env_variables.append(EnvironmentVariable(name=env_name, value=app_setting_key)) + + # --- Volumes --- + volume_mounts_raw, vol_warnings = _parse_compose_volumes(svc.get('volumes'), top_level_volumes) + all_warnings.extend(vol_warnings) + volume_mounts = [] + for vm in volume_mounts_raw: + volume_mounts.append(VolumeMount( + volume_sub_path=vm["volume_sub_path"], + container_mount_path=vm["container_mount_path"], + read_only=vm.get("read_only", False), + )) + + sitecontainer_specs.append({ + "service_name": svc_name, + "container_name": container_name, + "image": image, + "target_port": target_port, + "startup_command": startup_command, + "env_variables": env_variables or None, + "volume_mounts": volume_mounts or None, + }) + + if not sitecontainer_specs: + raise ValidationError("No valid services found in the Docker Compose file.") + + # Check for container name collisions after sanitization + seen_names = {} + for spec in sitecontainer_specs: + cname = spec["container_name"] + if cname in seen_names: + raise ValidationError( + f"Container name collision: services '{seen_names[cname]}' and '{spec['service_name']}' " + f"both sanitize to container name '{cname}'. Rename one of the services to avoid this." + ) + seen_names[cname] = spec["service_name"] + + # ----------------------------------------------------------------------- + # Step 4: Determine main container + # ----------------------------------------------------------------------- + main_svc_name = None + if main_container_name: + # User explicitly specified which service is main + match = next((s for s in sitecontainer_specs + if s["service_name"] == main_container_name or + s["container_name"] == main_container_name), None) + if not match: + available = [s["service_name"] for s in sitecontainer_specs] + raise ValidationError( + f"Specified main container '{main_container_name}' not found in compose services. " + f"Available services: {available}" + ) + main_svc_name = match["service_name"] + elif len(services_with_ports) == 1: + # Auto-detect: single service with ports → main + main_svc_name = services_with_ports[0] + logger.warning("Auto-detected main container: '%s' (only service with port mapping)", main_svc_name) + elif len(services_with_ports) > 1: + # Multiple services with ports – use the first one but warn + main_svc_name = services_with_ports[0] + all_warnings.append( + f"WARNING: Multiple services have port mappings: {services_with_ports}. " + f"Using '{main_svc_name}' as the main container. " + f"Use --main-container-name to specify a different one." + ) + else: + # No services have ports – use the first service + main_svc_name = sitecontainer_specs[0]["service_name"] + all_warnings.append( + f"WARNING: No services have port mappings. Using '{main_svc_name}' as the main container. " + f"Use --main-container-name to specify a different one. " + f"The main container typically needs a targetPort." + ) + + # ----------------------------------------------------------------------- + # Step 5: Print all collected warnings + # ----------------------------------------------------------------------- + if all_warnings: + logger.warning("") + logger.warning("=" * 70) + logger.warning("CONVERSION WARNINGS AND NOTICES") + logger.warning("=" * 70) + for w in all_warnings: + logger.warning(w) + logger.warning("=" * 70) + logger.warning("") + + # Print networking change notice + logger.warning("IMPORTANT: In Sidecars, all containers share the same network namespace " + "(localhost). If your containers previously communicated using Docker Compose " + "service names (e.g., 'http://redis:6379'), you must update them to use " + "'localhost' and ensure each container listens on a unique port.") + + if all_warnings: + logger.warning("") + if not prompt_y_n("Do you want to proceed with the conversion?"): + logger.warning("Conversion aborted.") + return + + # ----------------------------------------------------------------------- + # Step 6: Create/update app settings for environment variables + # ----------------------------------------------------------------------- + if new_app_settings: + logger.warning("Creating %d app setting(s) for environment variable references...", len(new_app_settings)) + settings_list = [f"{k}={v}" for k, v in new_app_settings.items()] + update_app_settings(cmd, resource_group, name, settings_list, slot) + + # ----------------------------------------------------------------------- + # Step 7: Determine auth type (shared across all containers from site config) + # ----------------------------------------------------------------------- + auth_kwargs = {} + if acr_use_managed_identity_creds: + if acr_user_managed_identity_id: + logger.warning("Using User-Assigned Managed Identity for ACR authentication.") + auth_kwargs["user_assigned_identity"] = acr_user_managed_identity_id + else: + logger.warning("Using System-Assigned Managed Identity for ACR authentication.") + auth_kwargs["system_assigned_identity"] = True + elif acr_user_name and acr_user_password: + logger.warning("Using User Credentials for ACR authentication.") + auth_kwargs["registry_username"] = acr_user_name + auth_kwargs["registry_password"] = acr_user_password + else: + logger.warning("Using anonymous access for image pull authentication.") + + # ----------------------------------------------------------------------- + # Step 8: Create sitecontainer resources + # ----------------------------------------------------------------------- + created_containers = [] + for spec in sitecontainer_specs: + is_main = spec["service_name"] == main_svc_name + + # Create the SiteContainer directly (not via create_webapp_sitecontainers) + # because environment_variables and volume_mounts are not exposed as + # individual kwargs on the higher-level create function. + auth_type = AuthType.ANONYMOUS + if auth_kwargs.get("system_assigned_identity"): + auth_type = AuthType.SYSTEM_IDENTITY + elif auth_kwargs.get("user_assigned_identity"): + auth_type = AuthType.USER_ASSIGNED + elif auth_kwargs.get("registry_username") and auth_kwargs.get("registry_password"): + auth_type = AuthType.USER_CREDENTIALS + + sitecontainer = SiteContainer( + image=spec["image"], + target_port=spec["target_port"], + start_up_command=spec["startup_command"], + is_main=is_main, + auth_type=auth_type, + user_name=auth_kwargs.get("registry_username"), + password_secret=auth_kwargs.get("registry_password"), + user_managed_identity_client_id=auth_kwargs.get("user_assigned_identity"), + volume_mounts=spec["volume_mounts"], + environment_variables=spec["env_variables"], + # Non-main (sidecar) containers should NOT inherit the webapp's + # app settings and connection strings by default. They receive + # only the env vars explicitly declared in the compose file. + inherit_app_settings_and_connection_strings=None if is_main else False, + ) + + try: + _create_or_update_webapp_sitecontainer_internal( + cmd, name, resource_group, spec["container_name"], sitecontainer, slot + ) + created_containers.append(spec["container_name"]) + logger.warning(" Created sitecontainer '%s'%s", spec["container_name"], + " (main)" if is_main else "") + except Exception as ex: # pylint: disable=broad-exception-caught + # Rollback: delete containers we already created + logger.error("Failed to create sitecontainer '%s': %s", spec["container_name"], str(ex)) + logger.warning("Rolling back: deleting %d already-created container(s)...", len(created_containers)) + for c_name in created_containers: + try: + delete_webapp_sitecontainer(cmd, name, resource_group, c_name, slot) + except Exception: # pylint: disable=broad-exception-caught + pass + raise AzureInternalError( + f"Failed to create sitecontainer '{spec['container_name']}' during compose conversion. " + f"All created containers have been rolled back. Error: {ex}" + ) + + # ----------------------------------------------------------------------- + # Step 9: Set linuxFxVersion to SITECONTAINERS + # ----------------------------------------------------------------------- + logger.warning("Setting linuxFxVersion to SITECONTAINERS") + update_site_configs(cmd, resource_group, name, slot=slot, linux_fx_version="SITECONTAINERS") + + logger.warning("") + logger.warning("Webapp '%s' successfully converted from COMPOSE to sitecontainers mode.", name) + logger.warning(" %d sitecontainer(s) created: %s", len(created_containers), ", ".join(created_containers)) + logger.warning(" Main container: '%s'", + next(s["container_name"] for s in sitecontainer_specs if s["service_name"] == main_svc_name)) + if new_app_settings: + logger.warning(" %d app setting(s) created for environment variable references.", len(new_app_settings)) + + def _convert_webapp_to_docker(cmd, name, resource_group, slot): site_config = get_site_configs(cmd, resource_group, name, slot) linux_fx_version = getattr(site_config, "linux_fx_version", None) diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-basic.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-basic.yml new file mode 100644 index 00000000000..9a27c61c1a4 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-basic.yml @@ -0,0 +1,10 @@ +# Basic multi-container: web app + redis sidecar +# Tests: auto-detect main (web has ports), simple image extraction, no env/volumes +version: '3' +services: + web: + image: "myacr.azurecr.io/webapp:v1" + ports: + - "8080:8080" + redis: + image: "redis:7-alpine" diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-entrypoint-command.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-entrypoint-command.yml new file mode 100644 index 00000000000..7e1f2ce2aaa --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-entrypoint-command.yml @@ -0,0 +1,26 @@ +# Entrypoint and command in various forms +# Tests: entrypoint+command merging, scalar vs sequence formats, startUpCommand generation +version: '3' +services: + web: + image: "myacr.azurecr.io/flask-app:latest" + ports: + - "5000:5000" + # Entrypoint as string, command as string → both merged + entrypoint: "gunicorn" + command: "--bind 0.0.0.0:5000 app:app --workers 4" + worker: + image: "myacr.azurecr.io/celery-worker:latest" + # Entrypoint as list, command as list + entrypoint: ["celery", "-A", "tasks"] + command: ["worker", "--loglevel=info", "--concurrency=2"] + scheduler: + image: "myacr.azurecr.io/celery-worker:latest" + # Only command (no entrypoint) + command: "celery -A tasks beat --loglevel=info" + sidecar: + image: "myacr.azurecr.io/monitoring:v1" + # Only entrypoint (no command) + entrypoint: ["/usr/local/bin/monitor", "--port", "9090"] + ports: + - "9090:9090" diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-env-mapping.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-env-mapping.yml new file mode 100644 index 00000000000..5512116ce54 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-env-mapping.yml @@ -0,0 +1,21 @@ +# Environment variables in mapping format (dict) +# Tests: env var extraction, inline values → app settings, mapping format parsing +version: '3' +services: + api: + image: "myacr.azurecr.io/api-server:latest" + ports: + - "3000:3000" + environment: + NODE_ENV: production + DB_HOST: localhost + DB_PORT: "5432" + API_KEY: "s3cret-key-value" + db: + image: "postgres:15-alpine" + ports: + - "5432:5432" + environment: + POSTGRES_USER: admin + POSTGRES_PASSWORD: "p@ssw0rd" + POSTGRES_DB: myapp diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-env-sequence.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-env-sequence.yml new file mode 100644 index 00000000000..2b00f96bd04 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-env-sequence.yml @@ -0,0 +1,18 @@ +# Environment variables in sequence (list) format +# Tests: env var sequence parsing, value-less entries (app setting reference) +version: '3' +services: + app: + image: "myacr.azurecr.io/myapp:2.0" + ports: + - "80:80" + environment: + - REDIS_URL=redis://localhost:6379 + - APP_SECRET=mysecretvalue + - EXISTING_SETTING + - ANOTHER_SETTING= + cache: + image: "redis:7-alpine" + environment: + - REDIS_MAXMEMORY=256mb + - REDIS_MAXMEMORY_POLICY=allkeys-lru diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-full.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-full.yml new file mode 100644 index 00000000000..7380079f502 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-full.yml @@ -0,0 +1,54 @@ +# Full realistic scenario: WordPress + MariaDB + Redis + Nginx reverse proxy +# Tests: multiple services, all field types, volume mapping, env vars, main detection +version: '3.8' +services: + nginx: + image: "myacr.azurecr.io/nginx-proxy:latest" + ports: + - "80:80" + - "443:443" + volumes: + - "${WEBAPP_STORAGE_HOME}/nginx/nginx.conf:/etc/nginx/nginx.conf" + - "${WEBAPP_STORAGE_HOME}/nginx/certs:/etc/nginx/certs" + entrypoint: ["nginx"] + command: ["-g", "daemon off;"] + restart: always + + wordpress: + image: "wordpress:6.4-php8.2-fpm" + ports: + - "9000:9000" + volumes: + - "${WEBAPP_STORAGE_HOME}/site/wwwroot:/var/www/html" + - type: bind + source: "${WEBAPP_STORAGE_HOME}/wordpress/uploads" + target: /var/www/html/wp-content/uploads + environment: + WORDPRESS_DB_HOST: "localhost:3306" + WORDPRESS_DB_USER: wp_user + WORDPRESS_DB_PASSWORD: "wp_s3cret" + WORDPRESS_DB_NAME: wordpress + WORDPRESS_TABLE_PREFIX: wp_ + WORDPRESS_CONFIG_EXTRA: | + define('WP_REDIS_HOST', 'localhost'); + define('WP_REDIS_PORT', 6379); + restart: always + + mariadb: + image: "mariadb:11" + ports: + - "3306:3306" + volumes: + - "${WEBAPP_STORAGE_HOME}/mysql/data:/var/lib/mysql" + environment: + - MYSQL_ROOT_PASSWORD=rootpass123 + - MYSQL_DATABASE=wordpress + - MYSQL_USER=wp_user + - MYSQL_PASSWORD=wp_s3cret + command: "--default-authentication-plugin=mysql_native_password" + restart: always + + redis: + image: "redis:7-alpine" + command: "redis-server --maxmemory 128mb --maxmemory-policy allkeys-lru" + restart: always diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-multi-port.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-multi-port.yml new file mode 100644 index 00000000000..425dfb68a48 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-multi-port.yml @@ -0,0 +1,14 @@ +# Multiple port mappings per service +# Tests: only first container port used as targetPort, multi-port warning +version: '3' +services: + app: + image: "myacr.azurecr.io/fullstack:latest" + ports: + - "80:80" + - "443:443" + - "8080:8080" + metrics: + image: "prom/prometheus:v2.48.0" + ports: + - "9090:9090" diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-no-ports.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-no-ports.yml new file mode 100644 index 00000000000..047ddba440e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-no-ports.yml @@ -0,0 +1,11 @@ +# No services have port mappings +# Tests: main container fallback to first service, warning about missing targetPort +version: '3' +services: + processor: + image: "myacr.azurecr.io/bg-processor:latest" + command: "python worker.py" + environment: + QUEUE_URL: "amqp://localhost:5672" + rabbitmq: + image: "rabbitmq:3-management-alpine" diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-port-conflict.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-port-conflict.yml new file mode 100644 index 00000000000..1e43e8ad8f0 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-port-conflict.yml @@ -0,0 +1,12 @@ +# Port conflicts: two services mapping different host ports to same container port +# Tests: CRITICAL port conflict warning, host≠container port warning +version: '3' +services: + frontend: + image: "myacr.azurecr.io/frontend:latest" + ports: + - "80:8080" + backend: + image: "myacr.azurecr.io/backend:latest" + ports: + - "8080:8080" diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-underscore-names.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-underscore-names.yml new file mode 100644 index 00000000000..93f551691de --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-underscore-names.yml @@ -0,0 +1,15 @@ +# Service names with underscores, dots, special characters +# Tests: _sanitize_container_name() converting to valid sitecontainer names +version: '3' +services: + my_web_app: + image: "myacr.azurecr.io/web:latest" + ports: + - "80:80" + background.worker: + image: "myacr.azurecr.io/worker:latest" + command: "python worker.py" + data-processor: + image: "myacr.azurecr.io/processor:latest" + UPPERCASE_SVC: + image: "redis:alpine" diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-unsupported-bind.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-unsupported-bind.yml new file mode 100644 index 00000000000..7dcaf8a0dfd --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-unsupported-bind.yml @@ -0,0 +1,17 @@ +# Unsupported bind mounts (not using ${WEBAPP_STORAGE_HOME}) +# Tests: UNSUPPORTED bind mount warning, mixed supported/unsupported volumes +version: '3' +services: + app: + image: "myacr.azurecr.io/app:latest" + ports: + - "8080:8080" + volumes: + # Supported: uses ${WEBAPP_STORAGE_HOME} + - "${WEBAPP_STORAGE_HOME}/site/wwwroot:/app/public" + # Unsupported: absolute host path + - "/var/data:/app/data" + # Unsupported: relative path + - "./config:/app/config" + # Supported: another valid bind mount + - "${WEBAPP_STORAGE_HOME}/logs:/app/logs" diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-bind.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-bind.yml new file mode 100644 index 00000000000..ab1d37d3086 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-bind.yml @@ -0,0 +1,23 @@ +# Volumes: bind mounts using ${WEBAPP_STORAGE_HOME} +# Tests: bind mount → /home volumeSubPath mapping, multiple mounts per service +version: '3' +services: + wordpress: + image: "wordpress:6.4-php8.2-apache" + ports: + - "80:80" + volumes: + - "${WEBAPP_STORAGE_HOME}/site/wwwroot:/var/www/html" + - "${WEBAPP_STORAGE_HOME}/wordpress/uploads:/var/www/html/wp-content/uploads" + environment: + WORDPRESS_DB_HOST: localhost + WORDPRESS_DB_NAME: wordpress + mysql: + image: "mysql:8.0" + ports: + - "3306:3306" + volumes: + - "${WEBAPP_STORAGE_HOME}/mysql/data:/var/lib/mysql" + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_DATABASE: wordpress diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-long.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-long.yml new file mode 100644 index 00000000000..477d55a68f6 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-long.yml @@ -0,0 +1,27 @@ +# Volumes: long syntax (mapping with type/source/target) +# Tests: long syntax bind mount parsing, long syntax volume parsing +version: '3.2' +services: + web: + image: "nginx:1.25-alpine" + ports: + - "80:80" + volumes: + - type: bind + source: "${WEBAPP_STORAGE_HOME}/site/wwwroot" + target: /usr/share/nginx/html + - type: bind + source: "${WEBAPP_STORAGE_HOME}/nginx/conf.d" + target: /etc/nginx/conf.d + read_only: true + api: + image: "myacr.azurecr.io/api:v3" + ports: + - "8080:8080" + volumes: + - type: volume + source: api-cache + target: /tmp/cache + +volumes: + api-cache: diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-named.yml b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-named.yml new file mode 100644 index 00000000000..01188155901 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/compose-convert-volumes-named.yml @@ -0,0 +1,22 @@ +# Volumes: named volumes (top-level volumes section) +# Tests: named volume → /compose/volumes/ mapping (local ephemeral), top-level volumes parsing, warnings +version: '3' +services: + app: + image: "myacr.azurecr.io/nodeapp:latest" + ports: + - "3000:3000" + volumes: + - app-data:/app/data + - app-logs:/app/logs + mongo: + image: "mongo:7" + ports: + - "27017:27017" + volumes: + - mongo-data:/data/db + +volumes: + app-data: + app-logs: + mongo-data: diff --git a/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_compose_convert.py b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_compose_convert.py new file mode 100644 index 00000000000..1e3fed31e1f --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_compose_convert.py @@ -0,0 +1,1145 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +Unit tests for the Compose → Sitecontainers conversion helpers in custom.py. + +These tests validate the parsing functions without requiring Azure connectivity. +Run with: python -m pytest -v +""" + +import os +import unittest +from base64 import b64encode +from unittest.mock import MagicMock, patch, call + +import yaml + + +# Import the helpers under test +from azure.cli.command_modules.appservice.custom import ( + _parse_compose_entrypoint_or_command, + _merge_entrypoint_command, + _parse_compose_environment, + _parse_compose_ports, + _parse_compose_volumes, + _make_bind_mount, + _make_named_volume_mount, + _sanitize_container_name, + _convert_compose_to_sitecontainers, + _COMPOSE_WEBAPP_STORAGE_HOME, +) + +SAMPLES_DIR = os.path.dirname(__file__) + + +def _load_sample(filename): + """Load and parse a sample compose YAML from the test directory.""" + path = os.path.join(SAMPLES_DIR, filename) + with open(path, 'r', encoding='utf-8') as f: + return yaml.safe_load(f) + + +def _b64_encode_file(filename): + """Return the base64-encoded contents of a sample file (simulating linuxFxVersion).""" + path = os.path.join(SAMPLES_DIR, filename) + with open(path, 'rb') as f: + return b64encode(f.read()).decode('utf-8') + + +# --------------------------------------------------------------------------- +# _sanitize_container_name +# --------------------------------------------------------------------------- +class TestSanitizeContainerName(unittest.TestCase): + def test_simple_name(self): + self.assertEqual(_sanitize_container_name("web"), "web") + + def test_underscores_replaced(self): + self.assertEqual(_sanitize_container_name("my_web_app"), "my-web-app") + + def test_dots_replaced(self): + self.assertEqual(_sanitize_container_name("background.worker"), "background-worker") + + def test_uppercase_lowered(self): + self.assertEqual(_sanitize_container_name("UPPERCASE_SVC"), "uppercase-svc") + + def test_already_valid(self): + self.assertEqual(_sanitize_container_name("data-processor"), "data-processor") + + def test_consecutive_specials_collapsed(self): + self.assertEqual(_sanitize_container_name("a__b..c"), "a-b-c") + + def test_empty_string(self): + self.assertEqual(_sanitize_container_name(""), "container") + + def test_leading_trailing_hyphens_stripped(self): + self.assertEqual(_sanitize_container_name("_leading_"), "leading") + + +# --------------------------------------------------------------------------- +# _parse_compose_entrypoint_or_command +# --------------------------------------------------------------------------- +class TestParseEntrypointOrCommand(unittest.TestCase): + def test_none_returns_empty(self): + self.assertEqual(_parse_compose_entrypoint_or_command(None), []) + + def test_string_split_on_whitespace(self): + result = _parse_compose_entrypoint_or_command("gunicorn --bind 0.0.0.0:5000 app:app") + self.assertEqual(result, ["gunicorn", "--bind", "0.0.0.0:5000", "app:app"]) + + def test_list_preserved(self): + result = _parse_compose_entrypoint_or_command(["celery", "-A", "tasks"]) + self.assertEqual(result, ["celery", "-A", "tasks"]) + + def test_single_word_string(self): + result = _parse_compose_entrypoint_or_command("python") + self.assertEqual(result, ["python"]) + + def test_entrypoint_empty_string(self): + result = _parse_compose_entrypoint_or_command("") + self.assertEqual(result, []) + + +# --------------------------------------------------------------------------- +# _merge_entrypoint_command +# --------------------------------------------------------------------------- +class TestMergeEntrypointCommand(unittest.TestCase): + def test_both_empty_returns_none(self): + self.assertIsNone(_merge_entrypoint_command([], [])) + + def test_only_entrypoint(self): + result = _merge_entrypoint_command(["python", "app.py"], []) + self.assertEqual(result, "python app.py") + + def test_only_command(self): + result = _merge_entrypoint_command([], ["--workers", "4"]) + self.assertEqual(result, "--workers 4") + + def test_both_merged(self): + result = _merge_entrypoint_command(["gunicorn"], ["--bind", "0.0.0.0:5000", "app:app"]) + self.assertEqual(result, "gunicorn --bind 0.0.0.0:5000 app:app") + + +# --------------------------------------------------------------------------- +# _parse_compose_environment +# --------------------------------------------------------------------------- +class TestParseComposeEnvironment(unittest.TestCase): + def test_env_none_returns_empty(self): + self.assertEqual(_parse_compose_environment(None), {}) + + def test_mapping_format(self): + env = {"NODE_ENV": "production", "PORT": 3000, "EMPTY": None} + result = _parse_compose_environment(env) + self.assertEqual(result, {"NODE_ENV": "production", "PORT": "3000", "EMPTY": ""}) + + def test_sequence_with_values(self): + env = ["KEY1=value1", "KEY2=value2"] + result = _parse_compose_environment(env) + self.assertEqual(result, {"KEY1": "value1", "KEY2": "value2"}) + + def test_sequence_value_with_equals(self): + """Value itself contains '=' characters.""" + env = ["CONNECTION=Server=localhost;Database=mydb"] + result = _parse_compose_environment(env) + self.assertEqual(result, {"CONNECTION": "Server=localhost;Database=mydb"}) + + def test_sequence_name_only(self): + """No '=' means value-less reference to an app setting.""" + env = ["EXISTING_SETTING"] + result = _parse_compose_environment(env) + self.assertEqual(result, {"EXISTING_SETTING": ""}) + + def test_sequence_empty_value(self): + """Trailing '=' means empty value.""" + env = ["KEY="] + result = _parse_compose_environment(env) + self.assertEqual(result, {"KEY": ""}) + + +# --------------------------------------------------------------------------- +# _parse_compose_ports +# --------------------------------------------------------------------------- +class TestParseComposePorts(unittest.TestCase): + def test_ports_none_returns_empty(self): + self.assertEqual(_parse_compose_ports(None), []) + + def test_host_container_mapping(self): + result = _parse_compose_ports(["8080:80", "3306:3306"]) + self.assertEqual(result, [(8080, 80), (3306, 3306)]) + + def test_single_port_no_host(self): + result = _parse_compose_ports(["80"]) + self.assertEqual(result, [(None, 80)]) + + def test_invalid_port_skipped(self): + result = _parse_compose_ports(["abc:xyz", "8080:80"]) + self.assertEqual(result, [(8080, 80)]) + + +# --------------------------------------------------------------------------- +# _parse_compose_volumes +# --------------------------------------------------------------------------- +class TestParseComposeVolumes(unittest.TestCase): + def test_volumes_none_returns_empty(self): + mounts, warnings = _parse_compose_volumes(None, {}) + self.assertEqual(mounts, []) + self.assertEqual(warnings, []) + + def test_bind_mount_short_syntax(self): + volumes = ["${WEBAPP_STORAGE_HOME}/site/wwwroot:/var/www/html"] + mounts, warnings = _parse_compose_volumes(volumes, {}) + self.assertEqual(len(mounts), 1) + self.assertEqual(mounts[0]["volume_sub_path"], "/home/site/wwwroot") + self.assertEqual(mounts[0]["container_mount_path"], "/var/www/html") + self.assertFalse(mounts[0]["read_only"]) + + def test_bind_mount_root_home(self): + """${WEBAPP_STORAGE_HOME} alone (no subpath) maps to /home.""" + volumes = ["${WEBAPP_STORAGE_HOME}:/mnt/home"] + mounts, warnings = _parse_compose_volumes(volumes, {}) + self.assertEqual(len(mounts), 1) + self.assertEqual(mounts[0]["volume_sub_path"], "/home") + self.assertEqual(mounts[0]["container_mount_path"], "/mnt/home") + + def test_bind_mount_with_ro(self): + volumes = ["${WEBAPP_STORAGE_HOME}/config:/etc/config:ro"] + mounts, warnings = _parse_compose_volumes(volumes, {}) + self.assertEqual(len(mounts), 1) + self.assertTrue(mounts[0]["read_only"]) + + def test_unsupported_host_path(self): + volumes = ["/var/data:/app/data"] + mounts, warnings = _parse_compose_volumes(volumes, {}) + self.assertEqual(len(mounts), 0) + self.assertTrue(any("UNSUPPORTED" in w for w in warnings)) + + def test_relative_path_unsupported(self): + volumes = ["./config:/app/config"] + mounts, warnings = _parse_compose_volumes(volumes, {}) + self.assertEqual(len(mounts), 0) + self.assertTrue(any("UNSUPPORTED" in w for w in warnings)) + + def test_named_volume(self): + volumes = ["mydata:/app/data"] + mounts, warnings = _parse_compose_volumes(volumes, {"mydata": None}) + self.assertEqual(len(mounts), 1) + self.assertEqual(mounts[0]["volume_sub_path"], "/compose/volumes/mydata") + self.assertEqual(mounts[0]["container_mount_path"], "/app/data") + self.assertTrue(any("Named volume" in w for w in warnings)) + + def test_long_syntax_bind(self): + volumes = [{"type": "bind", "source": "${WEBAPP_STORAGE_HOME}/site/wwwroot", "target": "/app"}] + mounts, warnings = _parse_compose_volumes(volumes, {}) + self.assertEqual(len(mounts), 1) + self.assertEqual(mounts[0]["volume_sub_path"], "/home/site/wwwroot") + self.assertEqual(mounts[0]["container_mount_path"], "/app") + + def test_long_syntax_bind_read_only(self): + volumes = [{ + "type": "bind", + "source": "${WEBAPP_STORAGE_HOME}/conf", + "target": "/etc/conf", + "read_only": True, + }] + mounts, warnings = _parse_compose_volumes(volumes, {}) + self.assertEqual(len(mounts), 1) + self.assertTrue(mounts[0]["read_only"]) + + def test_long_syntax_volume(self): + volumes = [{"type": "volume", "source": "cache-vol", "target": "/tmp/cache"}] + mounts, warnings = _parse_compose_volumes(volumes, {"cache-vol": None}) + self.assertEqual(len(mounts), 1) + self.assertEqual(mounts[0]["volume_sub_path"], "/compose/volumes/cache-vol") + + def test_mixed_supported_and_unsupported(self): + """Mix of valid bind mount, invalid host path, and named volume.""" + volumes = [ + "${WEBAPP_STORAGE_HOME}/site/wwwroot:/app/public", + "/var/data:/app/data", + "${WEBAPP_STORAGE_HOME}/logs:/app/logs", + ] + mounts, warnings = _parse_compose_volumes(volumes, {}) + self.assertEqual(len(mounts), 2) # Two valid bind mounts + self.assertTrue(any("UNSUPPORTED" in w for w in warnings)) + + +# --------------------------------------------------------------------------- +# _make_bind_mount +# --------------------------------------------------------------------------- +class TestMakeBindMount(unittest.TestCase): + def test_standard_path(self): + warnings = [] + result = _make_bind_mount("${WEBAPP_STORAGE_HOME}/site/wwwroot", "/var/www/html", False, warnings) + self.assertIsNotNone(result) + self.assertEqual(result["volume_sub_path"], "/home/site/wwwroot") + self.assertEqual(result["container_mount_path"], "/var/www/html") + self.assertEqual(len(warnings), 0) + + def test_root_only(self): + warnings = [] + result = _make_bind_mount("${WEBAPP_STORAGE_HOME}", "/mnt", False, warnings) + self.assertEqual(result["volume_sub_path"], "/home") + + def test_non_matching_source_returns_none(self): + warnings = [] + result = _make_bind_mount("/some/host/path", "/app", False, warnings) + self.assertIsNone(result) + self.assertEqual(len(warnings), 1) + + def test_nested_path(self): + warnings = [] + result = _make_bind_mount( + "${WEBAPP_STORAGE_HOME}/deep/nested/path/here", "/container/path", False, warnings + ) + self.assertEqual(result["volume_sub_path"], "/home/deep/nested/path/here") + + +# --------------------------------------------------------------------------- +# Integration-style: load sample YAML and validate structure +# --------------------------------------------------------------------------- +class TestSampleYamlParsing(unittest.TestCase): + """Validate that sample compose YAMLs parse correctly with yaml.safe_load.""" + + def _load(self, filename): + return _load_sample(filename) + + def test_basic_has_two_services(self): + compose = self._load("compose-convert-basic.yml") + self.assertIn("services", compose) + self.assertEqual(len(compose["services"]), 2) + self.assertIn("web", compose["services"]) + self.assertIn("redis", compose["services"]) + + def test_env_mapping_format(self): + compose = self._load("compose-convert-env-mapping.yml") + api_env = compose["services"]["api"]["environment"] + self.assertIsInstance(api_env, dict) + self.assertEqual(api_env["NODE_ENV"], "production") + + def test_env_sequence_format(self): + compose = self._load("compose-convert-env-sequence.yml") + app_env = compose["services"]["app"]["environment"] + self.assertIsInstance(app_env, list) + self.assertIn("REDIS_URL=redis://localhost:6379", app_env) + + def test_volumes_bind_mount(self): + compose = self._load("compose-convert-volumes-bind.yml") + wp_vols = compose["services"]["wordpress"]["volumes"] + self.assertEqual(len(wp_vols), 2) + self.assertTrue(wp_vols[0].startswith("${WEBAPP_STORAGE_HOME}")) + + def test_volumes_named(self): + compose = self._load("compose-convert-volumes-named.yml") + self.assertIn("volumes", compose) + self.assertIn("app-data", compose["volumes"]) + + def test_volumes_long_syntax(self): + compose = self._load("compose-convert-volumes-long.yml") + web_vols = compose["services"]["web"]["volumes"] + self.assertIsInstance(web_vols[0], dict) + self.assertEqual(web_vols[0]["type"], "bind") + + def test_entrypoint_command(self): + compose = self._load("compose-convert-entrypoint-command.yml") + web = compose["services"]["web"] + self.assertEqual(web["entrypoint"], "gunicorn") + self.assertEqual(web["command"], "--bind 0.0.0.0:5000 app:app --workers 4") + worker = compose["services"]["worker"] + self.assertIsInstance(worker["entrypoint"], list) + self.assertIsInstance(worker["command"], list) + + def test_port_conflict_file_parses(self): + compose = self._load("compose-convert-port-conflict.yml") + # Both services have port 8080 as container port + fe_ports = compose["services"]["frontend"]["ports"] + be_ports = compose["services"]["backend"]["ports"] + self.assertEqual(fe_ports, ["80:8080"]) + self.assertEqual(be_ports, ["8080:8080"]) + + def test_full_scenario_service_count(self): + compose = self._load("compose-convert-full.yml") + self.assertEqual(len(compose["services"]), 4) + + def test_underscore_names_parse(self): + compose = self._load("compose-convert-underscore-names.yml") + services = list(compose["services"].keys()) + self.assertIn("my_web_app", services) + self.assertIn("background.worker", services) + self.assertIn("UPPERCASE_SVC", services) + + def test_no_ports_file_parses(self): + compose = self._load("compose-convert-no-ports.yml") + # Neither service has ports + for svc in compose["services"].values(): + self.assertNotIn("ports", svc) + + def test_b64_encode_roundtrip(self): + """Verify base64 encode/decode preserves the YAML.""" + from base64 import b64decode + b64 = _b64_encode_file("compose-convert-basic.yml") + decoded = b64decode(b64.encode('utf-8')).decode('utf-8') + compose = yaml.safe_load(decoded) + self.assertIn("services", compose) + self.assertEqual(len(compose["services"]), 2) + + +# --------------------------------------------------------------------------- +# End-to-end parsing of volumes from sample files +# --------------------------------------------------------------------------- +class TestSampleVolumeParsing(unittest.TestCase): + """Parse volumes from sample YAMLs through the actual helper functions.""" + + def test_bind_mount_file(self): + compose = _load_sample("compose-convert-volumes-bind.yml") + wp_vols = compose["services"]["wordpress"]["volumes"] + mounts, warnings = _parse_compose_volumes(wp_vols, {}) + self.assertEqual(len(mounts), 2) + self.assertEqual(mounts[0]["volume_sub_path"], "/home/site/wwwroot") + self.assertEqual(mounts[0]["container_mount_path"], "/var/www/html") + self.assertEqual(mounts[1]["volume_sub_path"], "/home/wordpress/uploads") + + def test_named_volume_file(self): + compose = _load_sample("compose-convert-volumes-named.yml") + top_vols = compose.get("volumes", {}) + app_vols = compose["services"]["app"]["volumes"] + mounts, warnings = _parse_compose_volumes(app_vols, top_vols) + self.assertEqual(len(mounts), 2) + self.assertEqual(mounts[0]["volume_sub_path"], "/compose/volumes/app-data") + self.assertTrue(any("Named volume" in w for w in warnings)) + + def test_long_syntax_file(self): + compose = _load_sample("compose-convert-volumes-long.yml") + web_vols = compose["services"]["web"]["volumes"] + mounts, warnings = _parse_compose_volumes(web_vols, {}) + self.assertEqual(len(mounts), 2) + self.assertEqual(mounts[0]["volume_sub_path"], "/home/site/wwwroot") + self.assertEqual(mounts[0]["container_mount_path"], "/usr/share/nginx/html") + self.assertTrue(mounts[1]["read_only"]) # second mount has read_only: true + + def test_unsupported_bind_file(self): + compose = _load_sample("compose-convert-unsupported-bind.yml") + app_vols = compose["services"]["app"]["volumes"] + mounts, warnings = _parse_compose_volumes(app_vols, {}) + # 2 valid ${WEBAPP_STORAGE_HOME} mounts, 2 unsupported + self.assertEqual(len(mounts), 2) + unsupported_warnings = [w for w in warnings if "UNSUPPORTED" in w] + self.assertEqual(len(unsupported_warnings), 2) + + def test_full_scenario_volumes(self): + compose = _load_sample("compose-convert-full.yml") + # WordPress has short + long syntax volumes + wp_vols = compose["services"]["wordpress"]["volumes"] + mounts, warnings = _parse_compose_volumes(wp_vols, {}) + self.assertEqual(len(mounts), 2) + paths = [m["volume_sub_path"] for m in mounts] + self.assertIn("/home/site/wwwroot", paths) + self.assertIn("/home/wordpress/uploads", paths) + + +# --------------------------------------------------------------------------- +# End-to-end parsing of entrypoint/command from sample files +# --------------------------------------------------------------------------- +class TestSampleEntrypointParsing(unittest.TestCase): + def test_string_entrypoint_and_command(self): + compose = _load_sample("compose-convert-entrypoint-command.yml") + web = compose["services"]["web"] + ep = _parse_compose_entrypoint_or_command(web.get("entrypoint")) + cmd = _parse_compose_entrypoint_or_command(web.get("command")) + merged = _merge_entrypoint_command(ep, cmd) + self.assertEqual(merged, "gunicorn --bind 0.0.0.0:5000 app:app --workers 4") + + def test_list_entrypoint_and_command(self): + compose = _load_sample("compose-convert-entrypoint-command.yml") + worker = compose["services"]["worker"] + ep = _parse_compose_entrypoint_or_command(worker.get("entrypoint")) + cmd = _parse_compose_entrypoint_or_command(worker.get("command")) + merged = _merge_entrypoint_command(ep, cmd) + self.assertEqual(merged, "celery -A tasks worker --loglevel=info --concurrency=2") + + def test_command_only(self): + compose = _load_sample("compose-convert-entrypoint-command.yml") + scheduler = compose["services"]["scheduler"] + ep = _parse_compose_entrypoint_or_command(scheduler.get("entrypoint")) + cmd = _parse_compose_entrypoint_or_command(scheduler.get("command")) + merged = _merge_entrypoint_command(ep, cmd) + self.assertEqual(merged, "celery -A tasks beat --loglevel=info") + + def test_entrypoint_only(self): + compose = _load_sample("compose-convert-entrypoint-command.yml") + sidecar = compose["services"]["sidecar"] + ep = _parse_compose_entrypoint_or_command(sidecar.get("entrypoint")) + cmd = _parse_compose_entrypoint_or_command(sidecar.get("command")) + merged = _merge_entrypoint_command(ep, cmd) + self.assertEqual(merged, "/usr/local/bin/monitor --port 9090") + + +# --------------------------------------------------------------------------- +# End-to-end parsing of environment variables from sample files +# --------------------------------------------------------------------------- +class TestSampleEnvParsing(unittest.TestCase): + def test_env_mapping_format_from_file(self): + compose = _load_sample("compose-convert-env-mapping.yml") + env = _parse_compose_environment(compose["services"]["api"]["environment"]) + self.assertEqual(env["NODE_ENV"], "production") + self.assertEqual(env["DB_HOST"], "localhost") + self.assertEqual(env["DB_PORT"], "5432") + + def test_sequence_format(self): + compose = _load_sample("compose-convert-env-sequence.yml") + env = _parse_compose_environment(compose["services"]["app"]["environment"]) + self.assertEqual(env["REDIS_URL"], "redis://localhost:6379") + self.assertEqual(env["EXISTING_SETTING"], "") # name-only reference + + def test_multiline_value(self): + compose = _load_sample("compose-convert-full.yml") + env = _parse_compose_environment(compose["services"]["wordpress"]["environment"]) + # WORDPRESS_CONFIG_EXTRA has multiline YAML value + self.assertIn("WP_REDIS_HOST", env["WORDPRESS_CONFIG_EXTRA"]) + + +# --------------------------------------------------------------------------- +# Port parsing from sample files +# --------------------------------------------------------------------------- +class TestSamplePortParsing(unittest.TestCase): + def test_basic_ports(self): + compose = _load_sample("compose-convert-basic.yml") + ports = _parse_compose_ports(compose["services"]["web"].get("ports")) + self.assertEqual(ports, [(8080, 8080)]) + + def test_port_conflict_different_host_same_container(self): + compose = _load_sample("compose-convert-port-conflict.yml") + fe_ports = _parse_compose_ports(compose["services"]["frontend"].get("ports")) + be_ports = _parse_compose_ports(compose["services"]["backend"].get("ports")) + # Both have container port 8080 + self.assertEqual(fe_ports[0][1], 8080) + self.assertEqual(be_ports[0][1], 8080) + + def test_multi_port(self): + compose = _load_sample("compose-convert-multi-port.yml") + ports = _parse_compose_ports(compose["services"]["app"].get("ports")) + self.assertEqual(len(ports), 3) + self.assertEqual(ports[0], (80, 80)) + self.assertEqual(ports[1], (443, 443)) + self.assertEqual(ports[2], (8080, 8080)) + + def test_no_ports(self): + compose = _load_sample("compose-convert-no-ports.yml") + ports = _parse_compose_ports(compose["services"]["processor"].get("ports")) + self.assertEqual(ports, []) + + +# --------------------------------------------------------------------------- +# _make_named_volume_mount (direct tests) +# --------------------------------------------------------------------------- +class TestMakeNamedVolumeMount(unittest.TestCase): + def test_basic_named_volume(self): + warnings = [] + result = _make_named_volume_mount("mydata", "/app/data", False, {}, warnings) + self.assertIsNotNone(result) + self.assertEqual(result["volume_sub_path"], "/compose/volumes/mydata") + self.assertEqual(result["container_mount_path"], "/app/data") + self.assertFalse(result["read_only"]) + self.assertTrue(any("Named volume" in w for w in warnings)) + self.assertTrue(any("ephemeral" in w.lower() or "LOCAL" in w for w in warnings)) + + def test_named_volume_read_only(self): + warnings = [] + result = _make_named_volume_mount("cache", "/tmp/cache", True, {}, warnings) + self.assertTrue(result["read_only"]) + + def test_named_volume_with_top_level(self): + warnings = [] + result = _make_named_volume_mount("pgdata", "/var/lib/pg", False, {"pgdata": None}, warnings) + self.assertEqual(result["volume_sub_path"], "/compose/volumes/pgdata") + + def test_named_volume_persistence_warning(self): + warnings = [] + _make_named_volume_mount("data", "/data", False, {}, warnings) + self.assertTrue(any("persist" in w.lower() or "restart" in w.lower() for w in warnings)) + + +# --------------------------------------------------------------------------- +# Helpers to build mocked objects for orchestration tests +# --------------------------------------------------------------------------- +def _make_compose_b64(compose_dict): + """Encode a compose dict as COMPOSE| linuxFxVersion.""" + yaml_str = yaml.dump(compose_dict, default_flow_style=False) + b64 = b64encode(yaml_str.encode('utf-8')).decode('utf-8') + return f"COMPOSE|{b64}" + + +def _make_mock_site_config(**kwargs): + """Create a mock site_config object.""" + config = MagicMock() + config.acr_use_managed_identity_creds = kwargs.get("acr_use_managed_identity_creds", None) + config.acr_user_managed_identity_id = kwargs.get("acr_user_managed_identity_id", None) + return config + + +def _make_mock_cmd(existing_app_settings=None): + """Create a mock cmd object with CLI context for ARM calls.""" + cmd = MagicMock() + cmd.cli_ctx = MagicMock() + cmd.cli_ctx.cloud.endpoints.resource_manager = "https://management.azure.com" + + # Mock send_raw_request to return existing app settings + settings = existing_app_settings or {} + response = MagicMock() + response.json.return_value = {"properties": settings} + return cmd, response + + +# Patch targets in the module under test +_CUSTOM_MOD = "azure.cli.command_modules.appservice.custom" +_GET_SUB_ID = "azure.cli.core.commands.client_factory.get_subscription_id" + + +# --------------------------------------------------------------------------- +# Orchestration: Main container detection +# --------------------------------------------------------------------------- +class TestMainContainerDetection(unittest.TestCase): + """Test main container auto-detection logic in _convert_compose_to_sitecontainers.""" + + def _run_conversion(self, compose_dict, main_container_name=None, existing_settings=None): + """Run the conversion with mocked ARM calls and return created sitecontainers.""" + cmd, raw_response = _make_mock_cmd(existing_settings) + site_config = _make_mock_site_config() + linux_fx = _make_compose_b64(compose_dict) + created = [] + + def track_create(cmd, name, rg, container_name, sitecontainer, slot): + created.append({ + "container_name": container_name, + "is_main": sitecontainer.is_main, + "image": sitecontainer.image, + "target_port": sitecontainer.target_port, + }) + return MagicMock() + + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="00000000-0000-0000-0000-000000000000"), \ + patch(f"{_CUSTOM_MOD}._create_or_update_webapp_sitecontainer_internal", side_effect=track_create), \ + patch(f"{_CUSTOM_MOD}.update_app_settings"), \ + patch(f"{_CUSTOM_MOD}.update_site_configs"), \ + patch(f"{_CUSTOM_MOD}.prompt_y_n", return_value=True): + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx, main_container_name + ) + return created + + def test_single_service_with_port_is_main(self): + compose = { + "version": "3", + "services": { + "web": {"image": "nginx:alpine", "ports": ["80:80"]}, + "redis": {"image": "redis:alpine"}, + } + } + created = self._run_conversion(compose) + self.assertEqual(len(created), 2) + web = next(c for c in created if c["container_name"] == "web") + redis = next(c for c in created if c["container_name"] == "redis") + self.assertTrue(web["is_main"]) + self.assertFalse(redis["is_main"]) + + def test_multiple_ports_first_is_main(self): + # yaml.dump sorts keys alphabetically, so 'api' < 'web' means 'api' is first + compose = { + "version": "3", + "services": { + "api": {"image": "nginx:alpine", "ports": ["80:80"]}, + "web": {"image": "node:20", "ports": ["8080:8080"]}, + } + } + created = self._run_conversion(compose) + api = next(c for c in created if c["container_name"] == "api") + web = next(c for c in created if c["container_name"] == "web") + self.assertTrue(api["is_main"]) + self.assertFalse(web["is_main"]) + + def test_no_ports_first_service_is_main(self): + # yaml.dump sorts keys alphabetically, so 'alpha' < 'beta' means 'alpha' is first + compose = { + "version": "3", + "services": { + "alpha": {"image": "busybox:latest"}, + "beta": {"image": "busybox:latest"}, + } + } + created = self._run_conversion(compose) + alpha = next(c for c in created if c["container_name"] == "alpha") + beta = next(c for c in created if c["container_name"] == "beta") + self.assertTrue(alpha["is_main"]) + self.assertFalse(beta["is_main"]) + + def test_explicit_main_container_name_by_service(self): + compose = { + "version": "3", + "services": { + "frontend": {"image": "nginx:alpine", "ports": ["80:80"]}, + "backend": {"image": "node:20", "ports": ["8080:8080"]}, + } + } + created = self._run_conversion(compose, main_container_name="backend") + frontend = next(c for c in created if c["container_name"] == "frontend") + backend = next(c for c in created if c["container_name"] == "backend") + self.assertFalse(frontend["is_main"]) + self.assertTrue(backend["is_main"]) + + def test_explicit_main_container_name_not_found_raises(self): + compose = { + "version": "3", + "services": { + "web": {"image": "nginx:alpine", "ports": ["80:80"]}, + } + } + from azure.cli.core.azclierror import ValidationError + with self.assertRaises(ValidationError): + self._run_conversion(compose, main_container_name="nonexistent") + + +# --------------------------------------------------------------------------- +# Orchestration: Container name collision +# --------------------------------------------------------------------------- +class TestContainerNameCollision(unittest.TestCase): + + def test_collision_raises_validation_error(self): + """Services 'my_web_app' and 'my.web.app' both sanitize to 'my-web-app'.""" + compose = { + "version": "3", + "services": { + "my_web_app": {"image": "nginx:alpine", "ports": ["80:80"]}, + "my.web.app": {"image": "httpd:alpine"}, + } + } + cmd, raw_response = _make_mock_cmd() + site_config = _make_mock_site_config() + linux_fx = _make_compose_b64(compose) + + from azure.cli.core.azclierror import ValidationError + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="sub-id"), \ + patch(f"{_CUSTOM_MOD}.prompt_y_n", return_value=True): + with self.assertRaises(ValidationError) as ctx: + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx + ) + self.assertIn("collision", str(ctx.exception).lower()) + + +# --------------------------------------------------------------------------- +# Orchestration: Environment variable app setting naming +# --------------------------------------------------------------------------- +class TestEnvVarAppSettingNaming(unittest.TestCase): + + def _run_and_capture_settings(self, compose_dict, existing_settings=None): + """Run conversion and capture the app settings that would be created.""" + cmd, raw_response = _make_mock_cmd(existing_settings) + site_config = _make_mock_site_config() + linux_fx = _make_compose_b64(compose_dict) + captured_settings = {} + + def capture_update_settings(cmd, rg, name, settings_list, slot=None): + for s in settings_list: + key, val = s.split("=", 1) + captured_settings[key] = val + + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="sub-id"), \ + patch(f"{_CUSTOM_MOD}._create_or_update_webapp_sitecontainer_internal", return_value=MagicMock()), \ + patch(f"{_CUSTOM_MOD}.update_app_settings", side_effect=capture_update_settings), \ + patch(f"{_CUSTOM_MOD}.update_site_configs"), \ + patch(f"{_CUSTOM_MOD}.prompt_y_n", return_value=True): + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx + ) + return captured_settings + + def test_env_vars_create_compose_prefixed_settings(self): + compose = { + "version": "3", + "services": { + "web": { + "image": "nginx:alpine", + "ports": ["80:80"], + "environment": {"MY_VAR": "hello", "OTHER": "world"}, + } + } + } + settings = self._run_and_capture_settings(compose) + self.assertIn("COMPOSE_WEB_MY_VAR", settings) + self.assertEqual(settings["COMPOSE_WEB_MY_VAR"], "hello") + self.assertIn("COMPOSE_WEB_OTHER", settings) + self.assertEqual(settings["COMPOSE_WEB_OTHER"], "world") + + def test_env_var_service_name_sanitized_in_key(self): + """Service with underscores should have underscores in setting key (not hyphens).""" + compose = { + "version": "3", + "services": { + "my-api": { + "image": "node:20", + "ports": ["8080:8080"], + "environment": {"PORT": "8080"}, + } + } + } + settings = self._run_and_capture_settings(compose) + # _sanitize_container_name("my-api") -> "my-api", then upper + replace - with _ + self.assertIn("COMPOSE_MY_API_PORT", settings) + + def test_valueless_env_var_references_existing_setting(self): + """Env var with no value should reference existing app setting directly.""" + compose = { + "version": "3", + "services": { + "web": { + "image": "nginx:alpine", + "ports": ["80:80"], + "environment": ["EXISTING_KEY"], + } + } + } + # EXISTING_KEY already exists as an app setting + settings = self._run_and_capture_settings(compose, existing_settings={"EXISTING_KEY": "some-value"}) + # Should NOT create a COMPOSE_ prefixed setting for it + self.assertNotIn("COMPOSE_WEB_EXISTING_KEY", settings) + + def test_valueless_env_var_no_existing_creates_empty(self): + """Env var with no value and no existing setting creates empty COMPOSE_ key.""" + compose = { + "version": "3", + "services": { + "web": { + "image": "nginx:alpine", + "ports": ["80:80"], + "environment": ["MISSING_KEY"], + } + } + } + settings = self._run_and_capture_settings(compose, existing_settings={}) + self.assertIn("COMPOSE_WEB_MISSING_KEY", settings) + self.assertEqual(settings["COMPOSE_WEB_MISSING_KEY"], "") + + +# --------------------------------------------------------------------------- +# Orchestration: Port conflict warnings +# --------------------------------------------------------------------------- +class TestPortConflictWarnings(unittest.TestCase): + + def _run_and_capture_warnings(self, compose_dict): + """Run conversion and capture logger warnings.""" + cmd, raw_response = _make_mock_cmd() + site_config = _make_mock_site_config() + linux_fx = _make_compose_b64(compose_dict) + warnings = [] + + def capture_warning(msg, *args): + warnings.append(msg % args if args else msg) + + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="sub-id"), \ + patch(f"{_CUSTOM_MOD}._create_or_update_webapp_sitecontainer_internal", return_value=MagicMock()), \ + patch(f"{_CUSTOM_MOD}.update_app_settings"), \ + patch(f"{_CUSTOM_MOD}.update_site_configs"), \ + patch(f"{_CUSTOM_MOD}.logger") as mock_logger, \ + patch(f"{_CUSTOM_MOD}.prompt_y_n", return_value=True): + mock_logger.warning = capture_warning + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx + ) + return warnings + + def test_port_conflict_critical_warning(self): + compose = { + "version": "3", + "services": { + "frontend": {"image": "nginx:alpine", "ports": ["80:80"]}, + "backend": {"image": "httpd:alpine", "ports": ["8080:80"]}, + } + } + warnings = self._run_and_capture_warnings(compose) + critical = [w for w in warnings if "CRITICAL" in w and "80" in w] + self.assertTrue(len(critical) > 0, "Expected a CRITICAL port conflict warning for port 80") + + def test_host_container_port_mismatch_warning(self): + compose = { + "version": "3", + "services": { + "web": {"image": "nginx:alpine", "ports": ["8080:3000"]}, + } + } + warnings = self._run_and_capture_warnings(compose) + mismatch = [w for w in warnings if "Host port" in w and "8080" in w and "3000" in w] + self.assertTrue(len(mismatch) > 0, "Expected host/container port mismatch warning") + + def test_multiple_ports_warning(self): + compose = { + "version": "3", + "services": { + "web": {"image": "nginx:alpine", "ports": ["80:80", "443:443"]}, + } + } + warnings = self._run_and_capture_warnings(compose) + multi = [w for w in warnings if "Multiple port mappings" in w] + self.assertTrue(len(multi) > 0, "Expected multiple port mappings warning") + + def test_unsupported_keys_warning(self): + compose = { + "version": "3", + "services": { + "web": { + "image": "nginx:alpine", + "ports": ["80:80"], + "depends_on": ["redis"], + "healthcheck": {"test": "curl http://localhost"}, + }, + "redis": {"image": "redis:alpine"}, + } + } + warnings = self._run_and_capture_warnings(compose) + dep_warnings = [w for w in warnings if "depends_on" in w] + hc_warnings = [w for w in warnings if "healthcheck" in w] + self.assertTrue(len(dep_warnings) > 0, "Expected depends_on warning") + self.assertTrue(len(hc_warnings) > 0, "Expected healthcheck warning") + + def test_networking_notice_always_shown(self): + compose = { + "version": "3", + "services": { + "web": {"image": "nginx:alpine", "ports": ["80:80"]}, + } + } + warnings = self._run_and_capture_warnings(compose) + net = [w for w in warnings if "localhost" in w and "network namespace" in w] + self.assertTrue(len(net) > 0, "Expected networking change notice") + + +# --------------------------------------------------------------------------- +# Orchestration: Rollback on failure +# --------------------------------------------------------------------------- +class TestRollbackOnFailure(unittest.TestCase): + + def test_rollback_deletes_created_containers(self): + """If creating the 2nd container fails, the 1st should be rolled back.""" + compose = { + "version": "3", + "services": { + "web": {"image": "nginx:alpine", "ports": ["80:80"]}, + "sidecar": {"image": "redis:alpine"}, + } + } + cmd, raw_response = _make_mock_cmd() + site_config = _make_mock_site_config() + linux_fx = _make_compose_b64(compose) + call_count = [0] + + def fail_on_second(cmd, name, rg, container_name, sitecontainer, slot): + call_count[0] += 1 + if call_count[0] == 2: + raise Exception("Simulated ARM failure") + return MagicMock() + + from azure.cli.core.azclierror import AzureInternalError + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="sub-id"), \ + patch(f"{_CUSTOM_MOD}._create_or_update_webapp_sitecontainer_internal", side_effect=fail_on_second), \ + patch(f"{_CUSTOM_MOD}.update_app_settings"), \ + patch(f"{_CUSTOM_MOD}.update_site_configs"), \ + patch(f"{_CUSTOM_MOD}.delete_webapp_sitecontainer") as mock_delete, \ + patch(f"{_CUSTOM_MOD}.prompt_y_n", return_value=True): + with self.assertRaises(AzureInternalError): + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx + ) + # 'sidecar' < 'web' alphabetically, so sidecar is created first (succeeds), + # then web fails. Rollback deletes the already-created 'sidecar'. + mock_delete.assert_called_once() + delete_args = mock_delete.call_args + self.assertEqual(delete_args[0][1], "testapp") + self.assertEqual(delete_args[0][3], "sidecar") + + def test_linuxfxversion_not_set_on_failure(self): + """If container creation fails, linuxFxVersion should NOT be changed.""" + compose = { + "version": "3", + "services": { + "web": {"image": "nginx:alpine", "ports": ["80:80"]}, + } + } + cmd, raw_response = _make_mock_cmd() + site_config = _make_mock_site_config() + linux_fx = _make_compose_b64(compose) + + from azure.cli.core.azclierror import AzureInternalError + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="sub-id"), \ + patch(f"{_CUSTOM_MOD}._create_or_update_webapp_sitecontainer_internal", + side_effect=Exception("ARM failure")), \ + patch(f"{_CUSTOM_MOD}.update_app_settings"), \ + patch(f"{_CUSTOM_MOD}.update_site_configs") as mock_update_config, \ + patch(f"{_CUSTOM_MOD}.delete_webapp_sitecontainer"), \ + patch(f"{_CUSTOM_MOD}.prompt_y_n", return_value=True): + with self.assertRaises(AzureInternalError): + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx + ) + mock_update_config.assert_not_called() + + +# --------------------------------------------------------------------------- +# Orchestration: Auth type detection +# --------------------------------------------------------------------------- +class TestAuthTypeDetection(unittest.TestCase): + + def _run_and_capture_auth(self, compose_dict, site_config_kwargs=None, existing_settings=None): + """Run conversion and capture the auth_type used for each container.""" + cmd, raw_response = _make_mock_cmd(existing_settings) + site_config = _make_mock_site_config(**(site_config_kwargs or {})) + linux_fx = _make_compose_b64(compose_dict) + created = [] + + def track_create(cmd, name, rg, container_name, sitecontainer, slot): + created.append({ + "container_name": container_name, + "auth_type": sitecontainer.auth_type, + "user_name": sitecontainer.user_name, + "user_managed_identity_client_id": sitecontainer.user_managed_identity_client_id, + }) + return MagicMock() + + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="sub-id"), \ + patch(f"{_CUSTOM_MOD}._create_or_update_webapp_sitecontainer_internal", side_effect=track_create), \ + patch(f"{_CUSTOM_MOD}.update_app_settings"), \ + patch(f"{_CUSTOM_MOD}.update_site_configs"), \ + patch(f"{_CUSTOM_MOD}.prompt_y_n", return_value=True): + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx + ) + return created + + def test_anonymous_when_no_credentials(self): + compose = {"version": "3", "services": {"web": {"image": "nginx:alpine", "ports": ["80:80"]}}} + created = self._run_and_capture_auth(compose) + self.assertEqual(created[0]["auth_type"], "Anonymous") + + def test_user_credentials_from_app_settings(self): + compose = {"version": "3", "services": {"web": {"image": "myacr.azurecr.io/app:v1", "ports": ["80:80"]}}} + created = self._run_and_capture_auth(compose, existing_settings={ + "DOCKER_REGISTRY_SERVER_USERNAME": "myuser", + "DOCKER_REGISTRY_SERVER_PASSWORD": "mypass", + }) + self.assertEqual(created[0]["auth_type"], "UserCredentials") + self.assertEqual(created[0]["user_name"], "myuser") + + def test_system_identity(self): + compose = {"version": "3", "services": {"web": {"image": "myacr.azurecr.io/app:v1", "ports": ["80:80"]}}} + created = self._run_and_capture_auth(compose, site_config_kwargs={ + "acr_use_managed_identity_creds": True, + }) + self.assertEqual(created[0]["auth_type"], "SystemIdentity") + + def test_user_assigned_managed_identity(self): + compose = {"version": "3", "services": {"web": {"image": "myacr.azurecr.io/app:v1", "ports": ["80:80"]}}} + created = self._run_and_capture_auth(compose, site_config_kwargs={ + "acr_use_managed_identity_creds": True, + "acr_user_managed_identity_id": "client-id-123", + }) + self.assertEqual(created[0]["auth_type"], "UserAssigned") + self.assertEqual(created[0]["user_managed_identity_client_id"], "client-id-123") + + def test_auth_shared_across_all_containers(self): + compose = { + "version": "3", + "services": { + "web": {"image": "myacr.azurecr.io/web:v1", "ports": ["80:80"]}, + "worker": {"image": "myacr.azurecr.io/worker:v1"}, + } + } + created = self._run_and_capture_auth(compose, existing_settings={ + "DOCKER_REGISTRY_SERVER_USERNAME": "user", + "DOCKER_REGISTRY_SERVER_PASSWORD": "pass", + }) + self.assertEqual(len(created), 2) + for c in created: + self.assertEqual(c["auth_type"], "UserCredentials") + + +# --------------------------------------------------------------------------- +# Orchestration: Invalid compose input +# --------------------------------------------------------------------------- +class TestInvalidComposeInput(unittest.TestCase): + + def _run(self, linux_fx): + cmd, raw_response = _make_mock_cmd() + site_config = _make_mock_site_config() + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="sub-id"): + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx + ) + + def test_invalid_base64_raises(self): + from azure.cli.core.azclierror import ValidationError + with self.assertRaises(ValidationError) as ctx: + self._run("COMPOSE|!!!not-base64!!!") + self.assertIn("base64", str(ctx.exception).lower()) + + def test_missing_services_raises(self): + from azure.cli.core.azclierror import ValidationError + compose = {"version": "3"} # no services key + linux_fx = _make_compose_b64(compose) + with self.assertRaises(ValidationError) as ctx: + self._run(linux_fx) + self.assertIn("services", str(ctx.exception).lower()) + + def test_empty_services_raises(self): + from azure.cli.core.azclierror import ValidationError + compose = {"version": "3", "services": {}} + linux_fx = _make_compose_b64(compose) + with self.assertRaises(ValidationError) as ctx: + self._run(linux_fx) + self.assertIn("no services", str(ctx.exception).lower()) + + def test_service_without_image_raises(self): + from azure.cli.core.azclierror import ValidationError + compose = {"version": "3", "services": {"web": {"ports": ["80:80"]}}} + linux_fx = _make_compose_b64(compose) + with self.assertRaises(ValidationError) as ctx: + self._run(linux_fx) + self.assertIn("image", str(ctx.exception).lower()) + + +# --------------------------------------------------------------------------- +# Orchestration: linuxFxVersion set to SITECONTAINERS on success +# --------------------------------------------------------------------------- +class TestLinuxFxVersionSet(unittest.TestCase): + + def test_sitecontainers_fx_version_set_on_success(self): + compose = { + "version": "3", + "services": {"web": {"image": "nginx:alpine", "ports": ["80:80"]}} + } + cmd, raw_response = _make_mock_cmd() + site_config = _make_mock_site_config() + linux_fx = _make_compose_b64(compose) + + with patch(f"{_CUSTOM_MOD}.send_raw_request", return_value=raw_response), \ + patch(_GET_SUB_ID, return_value="sub-id"), \ + patch(f"{_CUSTOM_MOD}._create_or_update_webapp_sitecontainer_internal", return_value=MagicMock()), \ + patch(f"{_CUSTOM_MOD}.update_app_settings"), \ + patch(f"{_CUSTOM_MOD}.update_site_configs") as mock_update_config, \ + patch(f"{_CUSTOM_MOD}.prompt_y_n", return_value=True): + _convert_compose_to_sitecontainers( + cmd, "testapp", "testrg", None, site_config, linux_fx + ) + mock_update_config.assert_called_once_with( + cmd, "testrg", "testapp", slot=None, linux_fx_version="SITECONTAINERS" + ) + + +if __name__ == '__main__': + unittest.main()