diff --git a/src/aks-sreclaw/.gitignore b/src/aks-sreclaw/.gitignore
new file mode 100644
index 00000000000..3b4f4680dd2
--- /dev/null
+++ b/src/aks-sreclaw/.gitignore
@@ -0,0 +1,3 @@
+# Ignore Poetry artifacts
+poetry.lock
+pyproject.toml
diff --git a/src/aks-sreclaw/HISTORY.rst b/src/aks-sreclaw/HISTORY.rst
new file mode 100644
index 00000000000..4edaa7ca09d
--- /dev/null
+++ b/src/aks-sreclaw/HISTORY.rst
@@ -0,0 +1,17 @@
+.. :changelog:
+
+Release History
+===============
+
+Guidance
+++++++++
+If there is no rush to release a new version, please just add a description of the modification under the *Pending* section.
+
+To release a new version, please select a new version number (usually plus 1 to last patch version, X.Y.Z -> Major.Minor.Patch, more details in `\doc `_), and then add a new section named as the new version number in this file, the content should include the new modifications and everything from the *Pending* section. Finally, update the `VERSION` variable in `setup.py` with this new version number.
+
+Pending
++++++++
+
+1.0.0b1
++++++++
+* Add AKS SREClaw `az aks claw`.
diff --git a/src/aks-sreclaw/README.rst b/src/aks-sreclaw/README.rst
new file mode 100644
index 00000000000..3510edf40c8
--- /dev/null
+++ b/src/aks-sreclaw/README.rst
@@ -0,0 +1,186 @@
+Azure CLI AKS SREClaw Extension
+================================
+
+This extension provides commands to manage AKS SREClaw, an autonomous AI-powered troubleshooting assistant for Azure Kubernetes Service clusters.
+
+Installation
+------------
+
+To install the extension:
+
+.. code-block:: bash
+
+ az extension add --name aks-sreclaw
+
+Usage
+-----
+
+Deploy SREClaw to your AKS cluster
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Initialize and deploy SREClaw with interactive LLM configuration:
+
+.. code-block:: bash
+
+ az aks claw create --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system
+
+This command will:
+
+1. Prompt you to select an LLM provider (Azure OpenAI or OpenAI)
+2. Guide you through entering model names and API credentials
+3. Validate the connection to your LLM provider
+4. Prompt for a Kubernetes service account name
+5. Deploy the SREClaw helm chart to your cluster
+6. Wait for pods to be ready (up to 5 minutes)
+
+Deploy without waiting for completion
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: bash
+
+ az aks claw create --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system --no-wait
+
+Check deployment status
+~~~~~~~~~~~~~~~~~~~~~~~
+
+View the current status of your SREClaw deployment:
+
+.. code-block:: bash
+
+ az aks claw status --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system
+
+This displays:
+
+- Helm release status
+- Deployment replica counts
+- Pod status and readiness
+- Configured LLM providers with models and API endpoints
+
+Connect to SREClaw service
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Establish a port-forward connection to access the SREClaw web interface:
+
+.. code-block:: bash
+
+ az aks claw connect --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system
+
+The command will:
+
+- Display the gateway authentication token
+- Create a port-forward to localhost:18789
+- Provide instructions to open the service in your browser
+
+To use a different local port:
+
+.. code-block:: bash
+
+ az aks claw connect --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system --local-port 8080
+
+Press Ctrl+C to stop the port-forwarding.
+
+Delete SREClaw
+~~~~~~~~~~~~~~
+
+Uninstall SREClaw and clean up all resources:
+
+.. code-block:: bash
+
+ az aks claw delete --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system
+
+This command will:
+
+1. Prompt for confirmation
+2. Uninstall the SREClaw helm chart
+3. Delete all associated secrets and configurations
+4. Wait for pods to be removed
+
+To delete without waiting:
+
+.. code-block:: bash
+
+ az aks claw delete --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system --no-wait
+
+LLM Provider Configuration
+---------------------------
+
+Azure OpenAI
+~~~~~~~~~~~~
+
+When prompted during deployment, select Azure OpenAI and provide:
+
+- **Models**: Comma-separated model names (e.g., ``gpt-5.4,gpt-5.1``)
+- **API Key**: Your Azure OpenAI API key
+- **API Base**: Your Azure OpenAI endpoint (e.g., ``https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1/``)
+
+OpenAI
+~~~~~~
+
+When prompted during deployment, select OpenAI and provide:
+
+- **Models**: Comma-separated model names (e.g., ``gpt-5.4,gpt-5.1``)
+- **API Key**: Your OpenAI API key
+
+Prerequisites
+-------------
+
+- Azure CLI installed
+- An AKS cluster
+- kubectl configured to access your cluster
+- Appropriate permissions to deploy resources to your AKS cluster
+- An LLM provider account (Azure OpenAI or OpenAI) with API access
+
+Service Account Requirements
+-----------------------------
+
+SREClaw requires a Kubernetes service account with:
+
+- Appropriate Role and RoleBinding in the target namespace
+- For Azure resource access: annotation with ``azure.workload.identity/client-id: ``
+
+Ensure you create these before running ``az aks claw create``.
+
+Troubleshooting
+---------------
+
+Check deployment status
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: bash
+
+ az aks claw status --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system
+
+View pod logs
+~~~~~~~~~~~~~
+
+.. code-block:: bash
+
+ kubectl logs -n kube-system -l app.kubernetes.io/name=aks-sreclaw
+
+Verify helm release
+~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: bash
+
+ helm list -n kube-system
+
+Uninstall and reinstall
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you encounter issues:
+
+.. code-block:: bash
+
+ az aks claw delete --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system
+ az aks claw create --resource-group MyResourceGroup --name MyAKSCluster --namespace kube-system
+
+Support
+-------
+
+For issues and feature requests, please visit:
+https://github.com/Azure/azure-cli-extensions
+
+License
+-------
+
+This extension is licensed under the MIT License. See LICENSE.txt for details.
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/__init__.py
new file mode 100644
index 00000000000..f7e9ca91329
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/__init__.py
@@ -0,0 +1,45 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+# pylint: disable=unused-import
+import azext_aks_sreclaw._help
+from azext_aks_sreclaw._client_factory import CUSTOM_MGMT_AKS
+from azure.cli.core import AzCommandsLoader
+from azure.cli.core.profiles import register_resource_type
+
+
+def register_aks_sreclaw_resource_type():
+ register_resource_type(
+ "latest",
+ CUSTOM_MGMT_AKS,
+ None,
+ )
+
+
+class ContainerServiceCommandsLoader(AzCommandsLoader):
+
+ def __init__(self, cli_ctx=None):
+ from azure.cli.core.commands import CliCommandType
+ register_aks_sreclaw_resource_type()
+
+ aks_sreclaw_custom = CliCommandType(operations_tmpl='azext_aks_sreclaw.custom#{}')
+ super().__init__(
+ cli_ctx=cli_ctx,
+ custom_command_type=aks_sreclaw_custom,
+ )
+
+ def load_command_table(self, args):
+ super().load_command_table(args)
+ from azext_aks_sreclaw.commands import load_command_table
+ load_command_table(self, args)
+ return self.command_table
+
+ def load_arguments(self, command):
+ super().load_arguments(command)
+ from azext_aks_sreclaw._params import load_arguments
+ load_arguments(self, command)
+
+
+COMMAND_LOADER_CLS = ContainerServiceCommandsLoader
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/_client_factory.py b/src/aks-sreclaw/azext_aks_sreclaw/_client_factory.py
new file mode 100644
index 00000000000..a6b657da2ec
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/_client_factory.py
@@ -0,0 +1,23 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+from azure.cli.core.commands.client_factory import get_mgmt_service_client
+from azure.cli.core.profiles import CustomResourceType
+
+CUSTOM_MGMT_AKS = CustomResourceType('azext_aks_sreclaw.vendored_sdks.azure_mgmt_containerservice.2025_10_01',
+ 'ContainerServiceClient')
+
+# Note: cf_xxx, as the client_factory option value of a command group at command declaration, it should ignore
+# parameters other than cli_ctx; get_xxx_client is used as the client of other services in the command implementation,
+# and usually accepts subscription_id as a parameter to reconfigure the subscription when sending the request
+
+
+# container service clients
+def get_container_service_client(cli_ctx, subscription_id=None):
+ return get_mgmt_service_client(cli_ctx, CUSTOM_MGMT_AKS, subscription_id=subscription_id)
+
+
+def cf_managed_clusters(cli_ctx, *_):
+ return get_container_service_client(cli_ctx).managed_clusters
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/_consts.py b/src/aks-sreclaw/azext_aks_sreclaw/_consts.py
new file mode 100644
index 00000000000..385edab7d3a
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/_consts.py
@@ -0,0 +1,29 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+import os
+
+# Configuration paths
+home_dir = os.path.expanduser("~")
+
+AGENT_NAMESPACE = "kube-system"
+AKS_SRECLAW_LABEL_SELECTOR = "app.kubernetes.io/name=aks-sreclaw"
+
+# Kubernetes WebSocket exec protocol constants
+RESIZE_CHANNEL = 4 # WebSocket channel for terminal resize messages
+# WebSocket heartbeat configuration (matching kubectl client-go)
+# Based on kubernetes/client-go/tools/remotecommand/websocket.go#L59-L65
+# pingPeriod = 5 * time.Second
+# pingReadDeadline = (pingPeriod * 12) + (1 * time.Second)
+# The read deadline is calculated to allow up to 12 missed pings plus 1 second buffer
+# This provides tolerance for network delays while detecting actual connection failures
+HEARTBEAT_INTERVAL = 5.0 # pingPeriod: 5 seconds between pings
+HEARTBEAT_TIMEOUT = (HEARTBEAT_INTERVAL * 12) + 1 # pingReadDeadline: 61 seconds total timeout
+
+# AKS SREClaw Version (shared by helm chart and docker image)
+AKS_SRECLAW_VERSION = "0.0.0"
+
+# Helm Configuration
+HELM_VERSION = "3.16.0"
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/_help.py b/src/aks-sreclaw/azext_aks_sreclaw/_help.py
new file mode 100644
index 00000000000..688fddeac48
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/_help.py
@@ -0,0 +1,137 @@
+# coding=utf-8
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+from knack.help_files import helps
+
+helps[
+ "aks claw"
+] = """
+ type: group
+ short-summary: Commands to manage Openclaw-powered SREClaw in a managed Kubernetes cluster.
+"""
+
+helps[
+ "aks claw create"
+] = """
+ type: command
+ short-summary: Initialize and deploy SREClaw to an AKS cluster.
+ long-summary: |-
+ This command deploys the SREClaw helm chart to your AKS cluster and guides you through
+ configuring an LLM provider. The command will prompt you to select an LLM provider
+ (Azure OpenAI or OpenAI), enter model names and API credentials, validate the connection,
+ and configure a Kubernetes service account.
+ parameters:
+ - name: --name -n
+ type: string
+ short-summary: Name of the managed cluster.
+ - name: --resource-group -g
+ type: string
+ short-summary: Name of the resource group.
+ - name: --namespace
+ type: string
+ short-summary: The Kubernetes namespace where SREClaw will be deployed.
+ long-summary: Required parameter. Specify the namespace for SREClaw deployment.
+ - name: --no-wait
+ type: bool
+ short-summary: Do not wait for the long-running operation to finish.
+ examples:
+ - name: Deploy SREClaw to kube-system namespace
+ text: |-
+ az aks claw create --resource-group myResourceGroup --name myAKSCluster --namespace kube-system
+ - name: Deploy SREClaw without waiting for completion
+ text: |-
+ az aks claw create --resource-group myResourceGroup --name myAKSCluster --namespace my-namespace --no-wait
+"""
+
+helps[
+ "aks claw delete"
+] = """
+ type: command
+ short-summary: Delete and uninstall SREClaw from an AKS cluster.
+ long-summary: |-
+ This command uninstalls the SREClaw helm chart and deletes all associated resources
+ from your AKS cluster, including secrets and configurations.
+ parameters:
+ - name: --name -n
+ type: string
+ short-summary: Name of the managed cluster.
+ - name: --resource-group -g
+ type: string
+ short-summary: Name of the resource group.
+ - name: --namespace
+ type: string
+ short-summary: The Kubernetes namespace where SREClaw is deployed.
+ long-summary: Required parameter. Specify the namespace where SREClaw is deployed.
+ - name: --no-wait
+ type: bool
+ short-summary: Do not wait for the long-running operation to finish.
+ examples:
+ - name: Delete SREClaw from kube-system namespace
+ text: |-
+ az aks claw delete --resource-group myResourceGroup --name myAKSCluster --namespace kube-system
+ - name: Delete SREClaw without waiting for completion
+ text: |-
+ az aks claw delete --resource-group myResourceGroup --name myAKSCluster --namespace my-namespace --no-wait
+"""
+
+helps[
+ "aks claw connect"
+] = """
+ type: command
+ short-summary: Establish a port-forward connection to the SREClaw service.
+ long-summary: |-
+ This command creates a port-forward to the aks-sreclaw service, making it accessible
+ on localhost. The command displays the gateway token needed for authentication and
+ provides instructions to open the service in a browser. Press Ctrl+C to stop.
+ parameters:
+ - name: --name -n
+ type: string
+ short-summary: Name of the managed cluster.
+ - name: --resource-group -g
+ type: string
+ short-summary: Name of the resource group.
+ - name: --namespace
+ type: string
+ short-summary: The Kubernetes namespace where the aks-sreclaw service is deployed.
+ long-summary: Required parameter. Specify the namespace where SREClaw is deployed.
+ - name: --local-port
+ type: int
+ short-summary: Local port to use for port-forwarding.
+ long-summary: Defaults to 18789 if not specified.
+ examples:
+ - name: Connect to SREClaw service on default port
+ text: |-
+ az aks claw connect --resource-group myResourceGroup --name myAKSCluster --namespace kube-system
+ - name: Connect to SREClaw service on custom port
+ text: |-
+ az aks claw connect --resource-group myResourceGroup --name myAKSCluster --namespace kube-system --local-port 8080
+"""
+
+helps[
+ "aks claw status"
+] = """
+ type: command
+ short-summary: Display the status of the SREClaw deployment.
+ long-summary: |-
+ This command shows the current status of the SREClaw deployment including helm release
+ status, deployment replica counts, pod status and readiness, and configured LLM providers
+ with their models and API endpoints.
+ parameters:
+ - name: --name -n
+ type: string
+ short-summary: Name of the managed cluster.
+ - name: --resource-group -g
+ type: string
+ short-summary: Name of the resource group.
+ - name: --namespace
+ type: string
+ short-summary: The Kubernetes namespace where SREClaw is deployed.
+ long-summary: Required parameter. Specify the namespace where SREClaw is deployed.
+ examples:
+ - name: Check SREClaw deployment status
+ text: |-
+ az aks claw status --resource-group myResourceGroup --name myAKSCluster --namespace kube-system
+"""
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/_params.py b/src/aks-sreclaw/azext_aks_sreclaw/_params.py
new file mode 100644
index 00000000000..42cbb621e38
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/_params.py
@@ -0,0 +1,91 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+# pylint: disable=too-many-statements,too-many-lines
+def load_arguments(self, _):
+ with self.argument_context("aks claw create") as c:
+ c.argument(
+ "resource_group_name",
+ options_list=["--resource-group", "-g"],
+ help="Name of resource group.",
+ )
+ c.argument(
+ "cluster_name",
+ options_list=["--name", "-n"],
+ help="Name of the managed cluster.",
+ )
+ c.argument(
+ "namespace",
+ options_list=["--namespace"],
+ help="The Kubernetes namespace where the sreclaw will be deployed.",
+ required=True,
+ )
+
+ with self.argument_context("aks claw connect") as c:
+ c.argument(
+ "resource_group_name",
+ options_list=["--resource-group", "-g"],
+ help="Name of resource group.",
+ )
+ c.argument(
+ "cluster_name",
+ options_list=["--name", "-n"],
+ help="Name of the managed cluster.",
+ )
+ c.argument(
+ "namespace",
+ options_list=["--namespace"],
+ help="The Kubernetes namespace where the openclaw service is deployed.",
+ required=True,
+ )
+ c.argument(
+ "local_port",
+ options_list=["--local-port"],
+ type=int,
+ help="Local port to use for port-forwarding. Defaults to 18789.",
+ required=False,
+ )
+
+ with self.argument_context("aks claw delete") as c:
+ c.argument(
+ "resource_group_name",
+ options_list=["--resource-group", "-g"],
+ help="Name of resource group.",
+ )
+ c.argument(
+ "cluster_name",
+ options_list=["--name", "-n"],
+ help="Name of the managed cluster.",
+ )
+ c.argument(
+ "namespace",
+ options_list=["--namespace"],
+ help="The Kubernetes namespace where the openclaw service is deployed.",
+ required=True,
+ )
+ c.argument(
+ "yes",
+ options_list=["--yes", "-y"],
+ action="store_true",
+ help="Do not prompt for confirmation.",
+ )
+
+ with self.argument_context("aks claw status") as c:
+ c.argument(
+ "resource_group_name",
+ options_list=["--resource-group", "-g"],
+ help="Name of resource group.",
+ )
+ c.argument(
+ "cluster_name",
+ options_list=["--name", "-n"],
+ help="Name of the managed cluster.",
+ )
+ c.argument(
+ "namespace",
+ options_list=["--namespace"],
+ help="The Kubernetes namespace where the openclaw service is deployed.",
+ required=True,
+ )
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/azext_metadata.json b/src/aks-sreclaw/azext_aks_sreclaw/azext_metadata.json
new file mode 100644
index 00000000000..3484d75a0c2
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/azext_metadata.json
@@ -0,0 +1,5 @@
+{
+ "azext.minCliCoreVersion": "2.76.0",
+ "azext.isPreview": true,
+ "name": "aks-sreclaw"
+}
\ No newline at end of file
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/commands.py b/src/aks-sreclaw/azext_aks_sreclaw/commands.py
new file mode 100644
index 00000000000..35754694f20
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/commands.py
@@ -0,0 +1,31 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+from azext_aks_sreclaw._client_factory import cf_managed_clusters
+from azure.cli.core.commands import CliCommandType
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+# pylint: disable=too-many-statements
+def load_command_table(self, _):
+ managed_clusters_sdk = CliCommandType(
+ operations_tmpl="azext_aks_sreclaw.vendored_sdks.azure_mgmt_containerservice.2025_10_01."
+ "operations._managed_clusters_operations#ManagedClustersOperations.{}",
+ operation_group="managed_clusters",
+ client_factory=cf_managed_clusters,
+ )
+
+ with self.command_group(
+ "aks",
+ managed_clusters_sdk,
+ client_factory=cf_managed_clusters,
+
+ ) as g:
+ g.custom_command("claw create", "aks_sreclaw_create", supports_no_wait=True)
+ g.custom_command("claw delete", "aks_sreclaw_delete", supports_no_wait=True)
+ g.custom_command("claw connect", "aks_sreclaw_connect")
+ g.custom_command("claw status", "aks_sreclaw_status")
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/custom.py b/src/aks-sreclaw/azext_aks_sreclaw/custom.py
new file mode 100644
index 00000000000..a47332bc50a
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/custom.py
@@ -0,0 +1,461 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+# pylint: disable=too-many-lines, disable=broad-except, disable=line-too-long
+
+from azext_aks_sreclaw.sreclaw.aks import get_aks_credentials
+from azext_aks_sreclaw.sreclaw.console import (
+ ERROR_COLOR,
+ HELP_COLOR,
+ INFO_COLOR,
+ SUCCESS_COLOR,
+ WARNING_COLOR,
+ get_console,
+)
+from azext_aks_sreclaw.sreclaw.k8s import AKSSREClawManager
+from azext_aks_sreclaw.sreclaw.k8s.aks_sreclaw_manager import (
+ AKSSREClawManagerLLMConfigBase,
+)
+from azext_aks_sreclaw.sreclaw.llm_providers import prompt_provider_choice
+from azext_aks_sreclaw.sreclaw.telemetry import CLITelemetryClient
+from azure.cli.core.azclierror import AzCLIError
+from azure.cli.core.commands.client_factory import get_subscription_id
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+# pylint: disable=too-many-branches
+def aks_sreclaw_create(cmd,
+ client,
+ resource_group_name,
+ cluster_name,
+ namespace=None,
+ no_wait=False,
+ ):
+ """Initialize SREClaw helm deployment with LLM configuration and cluster role setup."""
+ subscription_id = get_subscription_id(cmd.cli_ctx)
+
+ if not namespace:
+ namespace = "kube-system"
+
+ kubeconfig_path = get_aks_credentials(
+ client,
+ resource_group_name,
+ cluster_name
+ )
+ console = get_console()
+
+ with CLITelemetryClient(event_type="create"):
+ try:
+ console.print(
+ "\nš Welcome to AKS SREClaw initialization!",
+ style=f"bold {HELP_COLOR}")
+ console.print(
+ "\nThis will set up the sreclaw deployment in your cluster.",
+ style=f"bold {HELP_COLOR}")
+
+ console.print(f"\nš¦ Using namespace: {namespace}", style=INFO_COLOR)
+ aks_sreclaw_manager = AKSSREClawManager(
+ resource_group_name=resource_group_name,
+ cluster_name=cluster_name,
+ namespace=namespace,
+ subscription_id=subscription_id,
+ kubeconfig_path=kubeconfig_path,
+ )
+
+ # ===== PHASE 1: LLM Configuration Setup =====
+ _setup_llm_configuration(console, aks_sreclaw_manager)
+
+ # ===== PHASE 2: Helm Deployment =====
+ _setup_helm_deployment(console, aks_sreclaw_manager, no_wait)
+
+ except Exception as e:
+ console.print(f"ā Error during creation: {str(e)}", style=ERROR_COLOR)
+ raise AzCLIError(f"SREClaw creation failed: {str(e)}")
+
+
+def _setup_llm_configuration(console, aks_sreclaw_manager: AKSSREClawManagerLLMConfigBase):
+ """Setup LLM configuration by checking existing config and prompting user.
+
+ Args:
+ console: Console instance for output
+ aks_sreclaw_manager: AKS sreclaw manager instance (AKSSREClawManagerLLMConfigBase)
+ """
+ # Check if LLM configuration exists by getting the model list
+ model_list = aks_sreclaw_manager.get_llm_config()
+
+ if model_list:
+ console.print(
+ "LLM configuration already exists.",
+ style=f"bold {HELP_COLOR}")
+
+ # Display existing LLM configurations
+ console.print("\nš Existing LLM Providers:", style=f"bold {HELP_COLOR}")
+ for provider_name, provider_config in model_list.items():
+ console.print(f" ⢠Provider: {provider_name}", style=INFO_COLOR)
+ if "models" in provider_config and provider_config["models"]:
+ console.print(f" Models: {', '.join(provider_config['models'])}", style="cyan")
+ if "api_base" in provider_config and provider_config["api_base"]:
+ console.print(f" API Base: {provider_config['api_base']}", style="cyan")
+
+ # TODO: allow the user config multiple llm configs at one time?
+ user_input = console.input(
+ f"\n[{HELP_COLOR}]Do you want to add/update the LLM configuration? (y/N): [/]").strip().lower()
+ if user_input not in ['y', 'yes']:
+ console.print("Skipping LLM configuration update.", style=f"bold {HELP_COLOR}")
+ else:
+ _setup_and_create_llm_config(console, aks_sreclaw_manager)
+ else:
+ console.print("No existing LLM configuration found. Setting up new configuration...",
+ style=f"bold {HELP_COLOR}")
+ _setup_and_create_llm_config(console, aks_sreclaw_manager)
+
+
+def _setup_helm_deployment(console, aks_sreclaw_manager: AKSSREClawManager, no_wait: bool = False):
+ """Setup and deploy helm chart with service account configuration."""
+ console.print("\nš Phase 2: Helm Deployment", style=f"bold {HELP_COLOR}")
+
+ # Check current helm deployment status
+ agent_status = aks_sreclaw_manager.get_agent_status()
+ helm_status = agent_status.get("helm_status", "not_found")
+
+ if helm_status == "deployed":
+ console.print(f"ā
SREClaw helm chart is already deployed (status: {helm_status})", style=SUCCESS_COLOR)
+
+ # Display existing service account from helm values and service account is immutable.
+ service_account_name = aks_sreclaw_manager.sreclaw_service_account_name
+ console.print(
+ f"\nš¤ Current service account in namespace '{aks_sreclaw_manager.namespace}': {service_account_name}",
+ style="cyan")
+
+ elif helm_status == "not_found":
+ console.print(
+ f"Helm chart not deployed (status: {helm_status}). Setting up deployment...",
+ style=f"bold {HELP_COLOR}")
+
+ # Prompt for service account configuration
+ console.print("\nš¤ Service Account Configuration", style=f"bold {HELP_COLOR}")
+ console.print(
+ f"SREClaw requires a service account with appropriate Azure and Kubernetes permissions in the '{aks_sreclaw_manager.namespace}' namespace.",
+ style=INFO_COLOR)
+ console.print(
+ "Please ensure you have created the necessary Role and RoleBinding in your namespace for this service account.",
+ style=WARNING_COLOR)
+ console.print(
+ "To have access to Azure resources, the service account should be annotated with "
+ "'azure.workload.identity/client-id: '.",
+ style=WARNING_COLOR)
+
+ # Prompt user for service account name (required)
+ while True:
+ user_input = console.input(
+ f"\n[{HELP_COLOR}]Enter service account name: [/]").strip()
+ if user_input:
+ aks_sreclaw_manager.sreclaw_service_account_name = user_input
+ console.print(f"ā
Using service account: {user_input}", style=SUCCESS_COLOR)
+ break
+ console.print(
+ "Service account name cannot be empty. Please enter a valid service account name.", style=WARNING_COLOR)
+
+ else:
+ # Handle non-standard helm status (failed, pending-install, pending-upgrade, etc.)
+ cmd_flags = aks_sreclaw_manager.command_flags()
+ init_cmd_flags = aks_sreclaw_manager.command_flags()
+ console.print(
+ f"ā ļø Detected unexpected helm status: {helm_status}\n"
+ f"SREClaw deployment is in an unexpected state.\n\n"
+ f"To investigate, run: az aks claw --status {cmd_flags}\n"
+ f"To recover:\n"
+ f" 1. Clean up and recreate: az aks sreclaw delete {cmd_flags} && az aks claw create {init_cmd_flags}\n"
+ f" 2. Check deployment logs for more details",
+ style=HELP_COLOR)
+ raise AzCLIError(f"Cannot proceed with initialization due to unexpected helm status: {helm_status}")
+
+ # Deploy if configuration changed or helm charts not deployed
+ console.print("\nš Deploying SREClaw (this typically takes less than 2 minutes)...", style=INFO_COLOR)
+ success, error_msg = aks_sreclaw_manager.deploy_sreclaw(no_wait=no_wait)
+
+ if success:
+ console.print("ā
SREClaw deployed successfully!", style=SUCCESS_COLOR)
+ else:
+ console.print("ā Failed to deploy agent", style=ERROR_COLOR)
+ console.print(f"Error: {error_msg}", style=ERROR_COLOR)
+ cmd_flags = aks_sreclaw_manager.command_flags()
+ console.print(
+ f"Run 'az aks claw --status {cmd_flags}' to investigate the deployment issue.",
+ style=INFO_COLOR)
+ raise AzCLIError("Failed to deploy agent")
+
+ if no_wait:
+ console.print("\nš Deployment initiated successfully!", style=SUCCESS_COLOR)
+ cmd_flags = aks_sreclaw_manager.command_flags()
+ console.print(
+ f"You can check the status using 'az aks sreclaw --status {cmd_flags}'", style="cyan")
+ return
+
+ # Verify deployment is ready
+ console.print("Verifying deployment status...", style=INFO_COLOR)
+ agent_status = aks_sreclaw_manager.get_agent_status()
+ if agent_status.get("ready", False):
+ console.print("ā
SREClaw is ready and running!", style=SUCCESS_COLOR)
+ console.print("\nš Initialization completed successfully!", style=SUCCESS_COLOR)
+ else:
+ console.print(
+ "ā ļø SREClaw is deployed but not yet ready. It may take a few moments to start.",
+ style=WARNING_COLOR)
+ if helm_status not in ["deployed", "superseded"]:
+ cmd_flags = aks_sreclaw_manager.command_flags()
+ console.print(
+ f"You can check the status later using 'az aks sreclaw --status {cmd_flags}'", style="cyan")
+
+
+def _setup_and_create_llm_config(console, aks_sreclaw_manager: AKSSREClawManagerLLMConfigBase):
+ """Setup and create LLM configuration with user input.
+
+ Args:
+ console: Console instance for output
+ aks_sreclaw_manager: AKS sreclaw manager instance (AKSSREClawManagerLLMConfigBase)
+ """
+
+ # Prompt for LLM configuration
+ console.print("Please provide your LLM configuration. Type '/exit' to exit.", style=f"bold {HELP_COLOR}")
+
+ provider = prompt_provider_choice()
+ params = provider.prompt_params()
+
+ # Validate the connection
+ error, action = provider.validate_connection(params)
+
+ if error is None:
+ console.print("ā
LLM configuration validated successfully!", style=SUCCESS_COLOR)
+
+ try:
+ aks_sreclaw_manager.save_llm_config(provider, params)
+ console.print(
+ "ā
LLM configuration created/updated successfully in Kubernetes cluster!",
+ style=SUCCESS_COLOR)
+ except Exception as e:
+ console.print(f"ā Failed to save LLM configuration: {str(e)}", style=ERROR_COLOR)
+ raise AzCLIError(f"Failed to save LLM configuration: {str(e)}")
+
+ elif error is not None and action == "retry_input":
+ cmd_flags = aks_sreclaw_manager.init_command_flags()
+ raise AzCLIError(f"Please re-run `az aks claw create {cmd_flags}` to correct the input parameters. {error}")
+ else:
+ raise AzCLIError(f"Please check your deployed model and network connectivity. {error}")
+
+
+def aks_sreclaw_status(
+ cmd,
+ client,
+ resource_group_name,
+ cluster_name,
+ namespace,
+):
+ """Display the status of the SREClaw deployment."""
+
+ kubeconfig_path = get_aks_credentials(
+ client,
+ resource_group_name,
+ cluster_name
+ )
+ subscription_id = get_subscription_id(cmd.cli_ctx)
+
+ sreclaw_manager = AKSSREClawManager(
+ resource_group_name=resource_group_name,
+ cluster_name=cluster_name,
+ subscription_id=subscription_id,
+ namespace=namespace,
+ kubeconfig_path=kubeconfig_path
+ )
+
+ _aks_sreclaw_status(sreclaw_manager)
+
+
+def _aks_sreclaw_status(sreclaw_manager: AKSSREClawManager):
+ """Display the status of the SREClaw deployment."""
+ console = get_console()
+
+ console.print("\nš Checking SREClaw status...", style=INFO_COLOR)
+ agent_status = sreclaw_manager.get_agent_status()
+
+ # Display helm status
+ helm_status = agent_status.get("helm_status", "unknown")
+ if helm_status == "deployed":
+ console.print(f"\nā
Helm Release: {helm_status}", style=SUCCESS_COLOR)
+ elif helm_status == "not_found":
+ console.print("\nā Helm Release: Not found", style=ERROR_COLOR)
+ cmd_flags = sreclaw_manager.command_flags()
+ console.print(
+ f"SREClaw is not installed. Run 'az aks sreclaw create {cmd_flags}' to install.", style=INFO_COLOR)
+ return
+ else:
+ console.print(f"\nā ļø Helm Release: {helm_status}", style=WARNING_COLOR)
+
+ # Display service account
+ if sreclaw_manager.sreclaw_service_account_name:
+ console.print(f"\nš¤ Service Account: {sreclaw_manager.sreclaw_service_account_name}", style="bold cyan")
+
+ # Display deployment status
+ deployments = agent_status.get("deployments", [])
+ if deployments:
+ console.print("\nš¦ Deployments:", style="bold cyan")
+ for dep in deployments:
+ ready_replicas = dep.get("ready_replicas", 0)
+ replicas = dep.get("replicas", 0)
+ status_color = SUCCESS_COLOR if ready_replicas == replicas and replicas > 0 else WARNING_COLOR
+ console.print(f" ⢠{dep['name']}: {ready_replicas}/{replicas} ready", style=status_color)
+
+ # Display pod status
+ pods = agent_status.get("pods", [])
+ if pods:
+ console.print("\nš³ Pods:", style="bold cyan")
+ for pod in pods:
+ pod_name = pod.get("name", "unknown")
+ pod_phase = pod.get("phase", "unknown")
+ pod_ready = pod.get("ready", False)
+
+ if pod_ready and pod_phase == "Running":
+ console.print(f" ⢠{pod_name}: {pod_phase} ā", style=SUCCESS_COLOR)
+ elif pod_phase == "Running":
+ console.print(f" ⢠{pod_name}: {pod_phase} (not ready)", style=WARNING_COLOR)
+ else:
+ console.print(f" ⢠{pod_name}: {pod_phase}", style=WARNING_COLOR)
+
+ # Display LLM configurations
+ llm_configs = agent_status.get("llm_configs", [])
+ if llm_configs:
+ console.print("\nš LLM Providers:", style="bold cyan")
+ for llm_config in llm_configs:
+ provider_name = llm_config.get("provider", "unknown")
+ console.print(f" ⢠Provider: {provider_name}", style=INFO_COLOR)
+ if "models" in llm_config:
+ models = llm_config["models"]
+ if models:
+ console.print(f" Models: {', '.join(models)}", style="cyan")
+ if "api_base" in llm_config:
+ console.print(f" API Base: {llm_config['api_base']}", style="cyan")
+
+ # Display overall status
+ if agent_status.get("ready", False):
+ console.print("\nā
SREClaw is ready and running!", style=SUCCESS_COLOR)
+ else:
+ console.print("\nā ļø SREClaw is not fully ready", style=WARNING_COLOR)
+
+
+def aks_sreclaw_delete(
+ cmd,
+ client,
+ resource_group_name,
+ cluster_name,
+ namespace,
+ no_wait=False,
+ yes=False,
+):
+ """Delete and uninstall SREClaw."""
+ with CLITelemetryClient(event_type="delete"):
+ console = get_console()
+
+ console.print(
+ "\nā ļø Warning: This will uninstall SREClaw and delete all associated resources.",
+ style=WARNING_COLOR)
+
+ if not yes:
+ user_confirmation = console.input(
+ f"\n[{WARNING_COLOR}]Are you sure you want to proceed with delete? (y/N): [/]").strip().lower()
+
+ if user_confirmation not in ['y', 'yes']:
+ console.print("ā Delete cancelled.", style=INFO_COLOR)
+ return
+
+ console.print("\nšļø Starting delete (this typically takes a few seconds)...", style=INFO_COLOR)
+
+ kubeconfig = get_aks_credentials(
+ client,
+ resource_group_name,
+ cluster_name
+ )
+ subscription_id = get_subscription_id(cmd.cli_ctx)
+
+ aks_sreclaw_manager = AKSSREClawManager(
+ resource_group_name=resource_group_name,
+ cluster_name=cluster_name,
+ subscription_id=subscription_id,
+ namespace=namespace,
+ kubeconfig_path=kubeconfig
+ )
+
+ success = aks_sreclaw_manager.uninstall_sreclaw(no_wait=no_wait)
+
+ if success:
+ if no_wait:
+ console.print("ā
Delete initiated successfully!", style=SUCCESS_COLOR)
+ cmd_flags = aks_sreclaw_manager.command_flags()
+ console.print(
+ f"You can check the status using 'az aks sreclaw --status {cmd_flags}'", style="cyan")
+ else:
+ console.print("ā
Delete completed successfully! All resources have been removed.", style=SUCCESS_COLOR)
+ else:
+ cmd_flags = aks_sreclaw_manager.command_flags()
+ console.print(
+ f"ā Delete failed. Please run 'az aks sreclaw --status {cmd_flags}' to verify delete completion.", style=ERROR_COLOR)
+
+
+def aks_sreclaw_connect(
+ cmd,
+ client,
+ resource_group_name,
+ cluster_name,
+ namespace,
+ local_port=18789,
+):
+ """Port-forward to aks-sreclaw service."""
+ console = get_console()
+
+ with CLITelemetryClient(event_type="connect"):
+ try:
+ if not namespace:
+ raise AzCLIError("--namespace is required.")
+
+ console.print("\nš Connecting to aks-sreclaw service...", style=INFO_COLOR)
+
+ kubeconfig_path = get_aks_credentials(
+ client,
+ resource_group_name,
+ cluster_name
+ )
+ subscription_id = get_subscription_id(cmd.cli_ctx)
+
+ aks_sreclaw_manager = AKSSREClawManager(
+ resource_group_name=resource_group_name,
+ cluster_name=cluster_name,
+ subscription_id=subscription_id,
+ namespace=namespace,
+ kubeconfig_path=kubeconfig_path
+ )
+
+ # Get token and pod info before port-forwarding
+ gateway_token, pod_name, target_port = aks_sreclaw_manager.port_forward_to_service(local_port)
+
+ console.print("\n" + "=" * 80, style=SUCCESS_COLOR)
+ console.print("š Gateway Token", style=f"bold {SUCCESS_COLOR}")
+ console.print("=" * 80, style=SUCCESS_COLOR)
+ console.print(f"{gateway_token}", style="bold cyan")
+ console.print("=" * 80 + "\n", style=SUCCESS_COLOR)
+
+ console.print(
+ f"š Port-forwarding: localhost:{local_port} -> {aks_sreclaw_manager.chart_name}:{target_port}", style=INFO_COLOR)
+ console.print(f"š Open your browser and navigate to: http://localhost:{local_port}", style=INFO_COLOR)
+ console.print("Press Ctrl+C to stop\n", style="dim")
+
+ # Start blocking port-forward
+ aks_sreclaw_manager.start_port_forward(pod_name, target_port, local_port)
+ console.print("\nš Stopped", style=WARNING_COLOR)
+
+ except KeyboardInterrupt:
+ console.print("\nš Stopped", style=WARNING_COLOR)
+ except Exception as e:
+ raise AzCLIError(f"SREClaw connect failed: {str(e)}")
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/aks.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/aks.py
new file mode 100644
index 00000000000..9d7ffccf37d
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/aks.py
@@ -0,0 +1,143 @@
+
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+import errno
+import os
+import subprocess
+
+import yaml
+from azure.cli.core.azclierror import AzCLIError
+from knack.log import get_logger
+from knack.util import CLIError
+
+logger = get_logger(__name__)
+
+# NOTE(mainred): we can use get_default_cli().invoke() to trigger `az aks get-credentials` to fetch the kubeconfig,
+# but this command shows redundant warning log like
+# "The behavior of this command has been altered by the following extension: aks-preview" when aks-preview is installed
+# "Merged "" as current context in /" when the kubeconfig file already exists
+# and `--only-show-errors` does not suppress it for the global log handler has been initialized before invoking the
+# command, in the "az aks agent" commands. Resetting the log level for get-credentials will break the log behavior of
+# `az aks agent`. So we directly use the SDK to get the kubeconfig here, which makes sense for an aks extension.
+
+
+def get_aks_credentials(
+ client: str,
+ resource_group_name: str,
+ cluster_name: str,
+ admin: bool = False,
+ user="clusterUser",
+) -> str:
+ """Get AKS cluster kubeconfig."""
+
+ credentialResults = None
+ if admin:
+ credentialResults = client.list_cluster_admin_credentials(
+ resource_group_name, cluster_name)
+ else:
+ if user.lower() == 'clusteruser':
+ credentialResults = client.list_cluster_user_credentials(
+ resource_group_name, cluster_name)
+ elif user.lower() == 'clustermonitoringuser':
+ credentialResults = client.list_cluster_monitoring_user_credentials(
+ resource_group_name, cluster_name)
+ else:
+ raise AzCLIError("invalid user type for get credentials: {}".format(user))
+
+ if not credentialResults:
+ raise CLIError("No Kubernetes credentials found.")
+
+ kubeconfig = credentialResults.kubeconfigs[0].value.decode(
+ encoding='UTF-8')
+ kubeconfig_path = _get_kubeconfig_file_path(resource_group_name, cluster_name)
+
+ # Ensure the kubeconfig file exists and write kubeconfig to it
+ with os.fdopen(os.open(kubeconfig_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600), 'wt') as f:
+ f.write(kubeconfig)
+ try:
+ # Check if kubeconfig requires kubelogin with devicecode and convert it
+ if _uses_kubelogin_devicecode(kubeconfig):
+ import shutil
+ if shutil.which("kubelogin"):
+ try:
+ # Run kubelogin convert-kubeconfig -l azurecli
+ subprocess.run(
+ ["kubelogin", "convert-kubeconfig", "-l", "azurecli", "--kubeconfig", kubeconfig_path],
+ check=True,
+ )
+ logger.info("Converted kubeconfig to use Azure CLI authentication.")
+ except subprocess.CalledProcessError as e:
+ logger.warning("Failed to convert kubeconfig with kubelogin: %s", str(e))
+ except Exception as e: # pylint: disable=broad-except
+ logger.warning("Error running kubelogin: %s", str(e))
+ else:
+ raise AzCLIError(
+ "The kubeconfig uses devicecode authentication which requires kubelogin. "
+ "Please install kubelogin from https://github.com/Azure/kubelogin or run "
+ "'az aks install-cli' to install both kubectl and kubelogin. "
+ "If devicecode login fails, try running "
+ "'kubelogin convert-kubeconfig -l azurecli' to unblock yourself."
+ )
+ except (IndexError, ValueError) as exc:
+ raise CLIError("Fail to find kubeconfig file.") from exc
+
+ logger.info("Kubeconfig downloaded successfully to: %s", kubeconfig_path)
+ return kubeconfig_path
+
+
+def _uses_kubelogin_devicecode(kubeconfig: str) -> bool:
+ try:
+ config = yaml.safe_load(kubeconfig)
+
+ # Check if users section exists and has at least one user
+ if not config or not config.get('users') or len(config['users']) == 0:
+ return False
+
+ first_user = config['users'][0]
+ user_info = first_user.get('user', {})
+ exec_info = user_info.get('exec', {})
+
+ # Check if command is kubelogin
+ command = exec_info.get('command', '')
+ if 'kubelogin' not in command:
+ return False
+
+ # Check if args contains --login and devicecode
+ args = exec_info.get('args', [])
+ # Join args into a string for easier pattern matching
+ args_str = ' '.join(args)
+ # Check for '--login devicecode' or '-l devicecode'
+ if '--login devicecode' in args_str or '-l devicecode' in args_str:
+ return True
+ return False
+ except (yaml.YAMLError, KeyError, TypeError, AttributeError) as e:
+ # If there's any error parsing the kubeconfig, assume it doesn't require kubelogin
+ logger.debug("Error parsing kubeconfig: %s", str(e))
+ return False
+
+
+def _get_kubeconfig_file_path( # pylint: disable=unused-argument
+ resource_group_name: str,
+ cluster_name: str,
+ subscription_id: str = None
+):
+ """Get the path to the kubeconfig file for the AKS cluster."""
+
+ home_dir = os.path.expanduser("~")
+ kubeconfig_dir = os.path.join(home_dir, ".aks-agent", "kube")
+
+ # ensure that kube folder exists
+ if kubeconfig_dir and not os.path.exists(kubeconfig_dir):
+ try:
+ os.makedirs(kubeconfig_dir)
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ raise
+
+ kubeconfig_filename = f"kubeconfig-{resource_group_name}-{cluster_name}"
+ kubeconfig_path = os.path.join(kubeconfig_dir, kubeconfig_filename)
+
+ return kubeconfig_path
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/console.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/console.py
new file mode 100644
index 00000000000..f8b2068a43b
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/console.py
@@ -0,0 +1,39 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+"""
+Console utilities for AKS Agent CLI.
+Provides a singleton Rich Console instance and color constants for consistent terminal output.
+"""
+
+from rich.console import Console
+
+# Color constants for terminal output
+HELP_COLOR = "cyan" # Informational messages, help text
+SUCCESS_COLOR = "bold green" # Success messages
+WARNING_COLOR = "bold yellow" # Warning messages
+ERROR_COLOR = "bold red" # Error messages
+INFO_COLOR = "yellow" # General information
+HINT_COLOR = "bright_black" # Hints for user input
+DEFAULT_VALUE_COLOR = "bright_black" # Default value displays
+
+# Global singleton console instance
+_console_instance = None
+
+
+def get_console() -> Console:
+ """
+ Get the singleton Rich Console instance.
+
+ This ensures all console output in the AKS agent uses the same
+ Console instance for consistent formatting and behavior.
+
+ Returns:
+ Console: The shared Rich Console instance
+ """
+ global _console_instance # pylint: disable=global-statement
+ if _console_instance is None:
+ _console_instance = Console()
+ return _console_instance
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/__init__.py
new file mode 100644
index 00000000000..cb8e0e4f740
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/__init__.py
@@ -0,0 +1,24 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+"""
+Kubernetes managers package for AKS SREClaw.
+
+This package contains specialized manager classes for different Kubernetes operations:
+- HelmManager: OS-agnostic helm binary management and operations
+- AKSSREClawManager: AKS SREClaw deployment, upgrading, and lifecycle management
+- exec_command_in_pod: Standalone function for pod command execution
+"""
+
+from .aks_sreclaw_manager import AKSSREClawManager
+from .helm_manager import HelmManager, create_helm_manager
+from .pod_exec import exec_command_in_pod
+
+__all__ = [
+ "HelmManager",
+ "AKSSREClawManager",
+ "exec_command_in_pod",
+ "create_helm_manager",
+]
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/aks_sreclaw_manager.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/aks_sreclaw_manager.py
new file mode 100644
index 00000000000..12b074ee18c
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/aks_sreclaw_manager.py
@@ -0,0 +1,1167 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+import base64
+import json
+import os
+import tempfile
+from abc import ABC, abstractmethod
+from typing import Dict, List, Optional, Tuple, Union
+
+from azext_aks_sreclaw._consts import (
+ AGENT_NAMESPACE,
+ AKS_SRECLAW_LABEL_SELECTOR,
+)
+from azext_aks_sreclaw.sreclaw.k8s.helm_manager import HelmManager
+from azext_aks_sreclaw.sreclaw.llm_config_manager import LLMConfigManager
+from azext_aks_sreclaw.sreclaw.llm_providers import LLMProvider
+from azure.cli.core.azclierror import AzCLIError
+from knack.log import get_logger
+from kubernetes import client, config
+from kubernetes.client.rest import ApiException
+
+from .pod_exec import exec_command_in_pod
+
+logger = get_logger(__name__)
+
+
+class AKSSREClawManagerLLMConfigBase(ABC):
+ """Abstract base class for SREClaw Manager with LLM configuration support."""
+
+ @abstractmethod
+ def get_llm_config(self) -> Dict:
+ """
+ Get LLM configuration.
+
+ Returns:
+ Dictionary of model configurations if exists, empty dict otherwise
+ """
+
+ @abstractmethod
+ def save_llm_config(self, provider: LLMProvider, params: dict) -> None:
+ """
+ Save LLM configuration.
+
+ Args:
+ provider: LLM provider instance
+ params: Dictionary of model parameters
+ """
+
+ @abstractmethod
+ def exec_aks_sreclaw(self, command_flags: str = "") -> bool:
+ """
+ Execute SREClaw command.
+
+ Args:
+ command_flags: Additional flags for the aks-sreclaw command
+
+ Returns:
+ True if execution was successful
+
+ Raises:
+ AzCLIError: If execution fails
+ """
+
+ @abstractmethod
+ def command_flags(self) -> str:
+ """
+ Get command flags for general aks-agent commands.
+ Returns:
+ str: Command flags string appropriate for the concrete implementation.
+ """
+
+
+class AKSSREClawManager(AKSSREClawManagerLLMConfigBase): # pylint: disable=too-many-instance-attributes
+ """
+ SREClaw Manager for deploying and recycling SREClaw helm charts.
+
+ This class provides functionality to:
+ - Deploy SREClaw using helm charts
+ - Upgrade existing deployments
+ - Recycle (restart/refresh) agent pods
+ - Monitor deployment status
+ - Clean up resources
+ """
+
+ def __init__(self, resource_group_name: str, cluster_name: str,
+ subscription_id: str, namespace: str = AGENT_NAMESPACE,
+ kubeconfig_path: Optional[str] = None,
+ helm_manager: Optional[HelmManager] = None):
+ """
+ Initialize the SREClaw Manager.
+
+ Args:
+ resource_group_name: Azure resource group name for AKS cluster
+ cluster_name: AKS cluster name
+ subscription_id: Azure subscription ID
+ namespace: Kubernetes namespace for SREClaw (default: 'kube-system')
+ kubeconfig_path: Path to kubeconfig file (default: None - use default config)
+ helm_manager: HelmManager instance (default: None - create new one)
+ """
+ self.namespace = namespace
+ self.kubeconfig_path = kubeconfig_path
+ self.helm_release_name = "aks-sreclaw"
+ self.chart_name = "aks-sreclaw"
+
+ self.llm_secret_name = "sreclaw-llm-config-secrets"
+ self.gateway_secret_name = "sreclaw-gateway-token"
+
+ # AKS context - initialized via constructor
+ self.resource_group_name: str = resource_group_name
+ self.cluster_name: str = cluster_name
+ self.subscription_id: str = subscription_id
+
+ self.chart_repo = "oci://docker.io/mainred/aks-sreclaw"
+ self.chart_version = "0.1.0"
+
+ self.sreclaw_service_account_name: str = ""
+ self.llm_config_manager = LLMConfigManager()
+
+ # Initialize Kubernetes client
+ self._init_k8s_client()
+ # Use provided helm manager or create a new one with kubeconfig
+ self.helm_manager = helm_manager or HelmManager(kubeconfig_path=self.kubeconfig_path)
+
+ self._load_existing_helm_release_config()
+
+ def _init_k8s_client(self):
+ """Initialize Kubernetes client configuration."""
+ try:
+ if self.kubeconfig_path:
+ config.load_kube_config(config_file=self.kubeconfig_path)
+ else:
+ config.load_kube_config()
+
+ self.k8s_client = client.ApiClient()
+ self.apps_v1 = client.AppsV1Api()
+ self.core_v1 = client.CoreV1Api()
+ self.rbac_v1 = client.RbacAuthorizationV1Api()
+ logger.debug("Kubernetes client initialized successfully")
+
+ except Exception as e:
+ logger.error("Failed to initialize Kubernetes client: %s", e)
+ raise
+
+ def _load_existing_helm_release_config(self):
+ """
+ Load configuration from Helm chart values.
+
+ Returns:
+ Dictionary containing the configuration from Helm values
+ """
+ try:
+ # Get helm values for the deployed chart
+ success, output = self._run_helm_command([
+ "get", "values", self.helm_release_name,
+ "--namespace", self.namespace,
+ "--output", "json"
+ ], check=False)
+
+ # Check if release not found
+ if output == "RELEASE_NOT_FOUND":
+ logger.debug("Helm release '%s' not found, initializing with empty model_list",
+ self.helm_release_name)
+ self.llm_config_manager.model_list = {}
+ return
+ if not success:
+ logger.error("Failed to get Helm values: %s", output)
+ raise AzCLIError(f"Failed to get Helm values: {output}")
+
+ try:
+ helm_values = json.loads(output)
+
+ # Parse new helm values structure: openclaw.llm.providers
+ openclaw_config = helm_values.get("openclaw", {})
+ llm_config = openclaw_config.get("llm", {})
+ providers = llm_config.get("providers", [])
+
+ # Convert providers list to model_list dict format for internal use
+ model_list = {}
+ for provider in providers:
+ provider_name = provider.get("name")
+ if provider_name:
+ model_list[provider_name] = {
+ "models": provider.get("models", []),
+ "api_base": provider.get("apiBase"),
+ }
+
+ self.llm_config_manager.model_list = model_list
+ if not model_list:
+ logger.warning("No providers found in Helm values")
+ else:
+ logger.debug("LLM configuration loaded from Helm values: %d providers found", len(model_list))
+
+ # Read API keys from Kubernetes secret and populate model_list
+ self._populate_api_keys_from_secret()
+
+ # Load service account name from helm values
+ service_account_config = helm_values.get("serviceAccount", {})
+ service_account_name = service_account_config.get("name", "")
+ if service_account_name:
+ self.sreclaw_service_account_name = service_account_name
+ logger.debug("Service account name loaded from Helm values: %s", service_account_name)
+ else:
+ logger.warning("No service account name found in Helm values")
+
+ except json.JSONDecodeError as e:
+ logger.error("Failed to parse Helm values JSON: %s", e)
+ raise AzCLIError(f"Failed to parse Helm values JSON: {e}")
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.error("Failed to load LLM config from Helm values: %s", e)
+ raise AzCLIError(f"Failed to load LLM config from Helm values: {e}")
+
+ def _populate_api_keys_from_secret(self):
+ """
+ Read API keys from Kubernetes secret and populate them into model_list.
+
+ The secret key format is '{provider_name}-key' (e.g., 'azure-openai-key').
+ This method reads the actual API keys from the Kubernetes secret and
+ populates them into the provider configuration.
+ """
+ try:
+ # Try to read the secret
+ secret = self.core_v1.read_namespaced_secret(
+ name=self.llm_secret_name,
+ namespace=self.namespace
+ )
+
+ if not secret.data:
+ logger.warning("Secret '%s' exists but has no data", self.llm_secret_name)
+ return
+
+ # Decode secret data (base64 encoded)
+ secret_data = {}
+ for key, value in secret.data.items():
+ decoded_value = base64.b64decode(value).decode("utf-8")
+ secret_data[key] = decoded_value
+
+ logger.debug("Read %d API keys from secret '%s'", len(secret_data), self.llm_secret_name)
+
+ # Populate API keys into model_list
+ for provider_name, provider_config in self.llm_config_manager.model_list.items():
+ # The secret key is '{provider_name}-key'
+ secret_key = f"{provider_name}-key"
+
+ # If the secret contains this key, populate it
+ if secret_key in secret_data:
+ provider_config["api_key"] = secret_data[secret_key]
+ logger.debug("Populated API key for provider '%s' from secret key '%s'",
+ provider_name, secret_key)
+ else:
+ logger.warning("API key not found for provider '%s' (expected secret key: '%s')",
+ provider_name, secret_key)
+
+ except ApiException as e:
+ if e.status == 404:
+ logger.debug("Secret '%s' not found in namespace '%s', skipping API key population",
+ self.llm_secret_name, self.namespace)
+ else:
+ logger.warning("Failed to read secret '%s': %s", self.llm_secret_name, e)
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.warning("Unexpected error reading API keys from secret: %s", e)
+
+ def get_sreclaw_pods(self) -> Tuple[bool, Union[List[str], str]]:
+ """
+ Get running SREClaw pods from the Kubernetes cluster.
+
+ This function searches for pods with the label selector 'app.kubernetes.io/name=aks-sreclaw'
+ in the namespace and returns information about their status.
+ Note:
+ This function will log warning messages if some pods are not running but at least
+ one pod is available. Check the logs for complete status information.
+ Returns:
+ Tuple[bool, Union[List[str], str]]:
+ - First element: True if running pods found, False if error occurred
+ - Second element: List of running pod names if successful, detailed error message if failed
+ """
+ try:
+ # List pods with label selector
+ logger.debug("Searching for pods with label selector '%s' in namespace '%s'",
+ AKS_SRECLAW_LABEL_SELECTOR, self.namespace)
+
+ # Get pods with label selector
+ pod_list = self.core_v1.list_namespaced_pod(
+ namespace=self.namespace,
+ label_selector=AKS_SRECLAW_LABEL_SELECTOR
+ )
+
+ if not pod_list.items:
+ error_msg = (
+ f"No pods found with label selector '{AKS_SRECLAW_LABEL_SELECTOR}' "
+ f"in namespace '{self.namespace}'. "
+ f"This could mean:\n"
+ f" 1. SREClaw is not deployed in the cluster\n"
+ f" 2. The namespace '{self.namespace}' does not exist\n"
+ f" 3. The pods have different labels than expected\n"
+ f" 4. You may not have sufficient permissions to list pods in this namespace"
+ )
+ logger.error(error_msg)
+ return False, error_msg
+
+ # Categorize pods by status
+ running_pods = []
+ pending_pods = []
+ failed_pods = []
+ other_pods = []
+
+ for pod in pod_list.items:
+ pod_name = pod.metadata.name
+ pod_phase = pod.status.phase
+
+ if pod_phase == 'Running':
+ running_pods.append(pod_name)
+ elif pod_phase == 'Pending':
+ pending_pods.append(pod_name)
+ elif pod_phase == 'Failed':
+ failed_pods.append(pod_name)
+ else:
+ other_pods.append(f"{pod_name} ({pod_phase})")
+
+ # Log pod status summary
+ logger.debug("Found %d total pods: %d running, %d pending, %d failed, %d other",
+ len(pod_list.items), len(running_pods), len(pending_pods),
+ len(failed_pods), len(other_pods))
+
+ # Return running pods if any are available
+ if running_pods:
+ logger.debug("Available running pods: %s", ', '.join(running_pods))
+
+ # Warn about any non-running pods
+ warning_details = []
+ if pending_pods:
+ warning_details.append(f"{len(pending_pods)} pending pod(s): {', '.join(pending_pods)}")
+ if failed_pods:
+ warning_details.append(f"{len(failed_pods)} failed pod(s): {', '.join(failed_pods)}")
+ if other_pods:
+ warning_details.append(f"{len(other_pods)} pod(s) in other states: {', '.join(other_pods)}")
+
+ if warning_details:
+ warning_summary = "; ".join(warning_details)
+ logger.warning(
+ "Found %d running SREClaw pod(s), but some pods are not running: %s. "
+ "These pods may need attention.",
+ len(running_pods), warning_summary
+ )
+
+ return True, running_pods
+
+ # No running pods found - provide detailed error message
+ status_details = []
+ if pending_pods:
+ status_details.append(f"{len(pending_pods)} pending pod(s): {', '.join(pending_pods)}")
+ if failed_pods:
+ status_details.append(f"{len(failed_pods)} failed pod(s): {', '.join(failed_pods)}")
+ if other_pods:
+ status_details.append(f"{len(other_pods)} pod(s) in other states: {', '.join(other_pods)}")
+
+ status_summary = "; ".join(status_details) if status_details else "all pods are in unknown state"
+
+ error_msg = (
+ f"No running pods found with label selector '{AKS_SRECLAW_LABEL_SELECTOR}' "
+ f"in namespace '{self.namespace}'. "
+ f"Found {len(pod_list.items)} pod(s) but none are in Running state: {status_summary}. "
+ f"SREClaw pods may be starting up, failing to start, or experiencing issues."
+ )
+ logger.error(error_msg)
+ return False, error_msg
+
+ except ApiException as e:
+ if e.status == 403:
+ error_msg = (
+ f"Access denied when trying to list pods in namespace '{self.namespace}'. "
+ f"You may not have sufficient RBAC permissions. "
+ f"Error details: {e}"
+ )
+ elif e.status == 404:
+ error_msg = (
+ f"Namespace '{self.namespace}' not found. "
+ f"The SREClaw namespace may not exist in this cluster. "
+ f"Error details: {e}"
+ )
+ else:
+ error_msg = f"Kubernetes API error when listing pods: {e}"
+
+ logger.error(error_msg)
+ return False, error_msg
+ except Exception as e: # pylint: disable=broad-exception-caught
+ error_msg = f"Unexpected error while searching for SREClaw pods: {e}"
+ logger.error(error_msg)
+ return False, error_msg
+
+ def _run_helm_command(self, args: List[str], check: bool = True) -> tuple[bool, str]:
+ """
+ Execute a helm command using the helm manager.
+
+ Args:
+ args: List of helm command arguments
+ check: Whether to raise exception on non-zero exit code
+
+ Returns:
+ Tuple of (success, output)
+ """
+ return self.helm_manager.run_command(args, check=check)
+
+ def command_flags(self) -> str:
+ """
+ Get command flags for CLI commands.
+
+ Returns:
+ str: Command flags in format '-n {cluster_name} -g {resource_group_name} --namespace {namespace}'
+ """
+ return f"-n {self.cluster_name} -g {self.resource_group_name} --namespace {self.namespace}"
+
+ def _wait_for_pods_removed(self, timeout: int = 60, interval: int = 2) -> bool:
+ """
+ Wait for all SREClaw pods to be removed from the namespace.
+
+ Args:
+ timeout: Maximum time to wait in seconds (default: 60)
+ interval: Time to wait between checks in seconds (default: 2)
+
+ Returns:
+ bool: True if all pods are removed within timeout, False otherwise
+ """
+ import time
+
+ logger.info("Waiting for pods to be removed from namespace '%s'", self.namespace)
+ start_time = time.time()
+
+ while time.time() - start_time < timeout:
+ try:
+ # Check for pods with label selector
+ pod_list = self.core_v1.list_namespaced_pod(
+ namespace=self.namespace,
+ label_selector=AKS_SRECLAW_LABEL_SELECTOR
+ )
+
+ total_pods = len(pod_list.items)
+
+ if total_pods == 0:
+ logger.info("All pods removed successfully")
+ return True
+
+ logger.debug("Still %d pod(s) remaining, waiting...", total_pods)
+ time.sleep(interval)
+
+ except ApiException as e:
+ if e.status == 404:
+ # Namespace might have been deleted, consider this as success
+ logger.info("Namespace not found, pods are considered removed")
+ return True
+ logger.warning("Error checking pod status: %s", e)
+ time.sleep(interval)
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.warning("Unexpected error checking pod status: %s", e)
+ time.sleep(interval)
+
+ logger.warning("Timeout waiting for pods to be removed")
+ return False
+
+ def _wait_for_pods_ready(self, timeout: int = 300, interval: int = 5) -> bool:
+ """
+ Wait for SREClaw pods to be ready.
+
+ Args:
+ timeout: Maximum time to wait in seconds (default: 300 = 5 minutes)
+ interval: Time to wait between checks in seconds (default: 5)
+
+ Returns:
+ bool: True if pods are ready within timeout, False otherwise
+ """
+ import time
+
+ logger.info("Waiting for SREClaw pods to be ready in namespace '%s'", self.namespace)
+ start_time = time.time()
+
+ while time.time() - start_time < timeout:
+ try:
+ pod_list = self.core_v1.list_namespaced_pod(
+ namespace=self.namespace,
+ label_selector=AKS_SRECLAW_LABEL_SELECTOR
+ )
+
+ if not pod_list.items:
+ logger.debug("No pods found yet, waiting...")
+ time.sleep(interval)
+ continue
+
+ all_ready = self._check_all_pods_ready(pod_list.items)
+ if all_ready:
+ logger.info("All SREClaw pods are ready")
+ return True
+
+ time.sleep(interval)
+
+ except ApiException as e:
+ logger.warning("Error checking pod readiness: %s", e)
+ time.sleep(interval)
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.warning("Unexpected error checking pod readiness: %s", e)
+ time.sleep(interval)
+
+ logger.warning("Timeout waiting for SREClaw pods to be ready")
+ return False
+
+ def _check_all_pods_ready(self, pods) -> bool:
+ """Check if all pods are ready."""
+ for pod in pods:
+ pod_name = pod.metadata.name
+ pod_phase = pod.status.phase
+
+ if pod_phase != "Running":
+ logger.debug("Pod %s is in phase %s, waiting...", pod_name, pod_phase)
+ return False
+
+ if not self._is_pod_ready(pod):
+ logger.debug("Pod %s is not ready yet, waiting...", pod_name)
+ return False
+
+ return True
+
+ def _is_pod_ready(self, pod) -> bool:
+ """Check if a pod is ready."""
+ if pod.status.conditions:
+ for condition in pod.status.conditions:
+ if condition.type == "Ready" and condition.status == "True":
+ return True
+ return False
+
+ def deploy_sreclaw(self, chart_version: Optional[str] = None, no_wait: bool = False) -> Tuple[bool, str]:
+ """
+ Deploy SREClaw using helm chart.
+
+ Args:
+ chart_version: Specific chart version to deploy (default: latest)
+ no_wait: Do not wait for the long-running operation to finish (default: False)
+
+ Returns:
+ Tuple[bool, str]: (success, error_message)
+ - success: True if deployment was successful, False otherwise
+ - error_message: Error message if deployment failed, empty string if successful
+ """
+ logger.info("Deploying/Upgrading SREClaw to namespace '%s'", self.namespace)
+
+ # Prepare helm install command
+ helm_args = [
+ "upgrade", self.helm_release_name, self.chart_repo,
+ "--namespace", self.namespace,
+ "--install",
+ "--timeout", "2m"
+ ]
+
+ # Add --wait flag only if no_wait is False
+ if not no_wait:
+ helm_args.append("--wait")
+
+ # Add chart version if specified (prefer parameter, fallback to instance variable)
+ version_to_use = chart_version or self.chart_version
+ if version_to_use:
+ helm_args.extend(["--version", version_to_use])
+
+ # Add custom values if provided
+ values = self._create_helm_values()
+
+ # Create temporary file in a cross-platform way
+ values_file = None
+ try:
+ import yaml
+
+ # Create a temporary file that works on both Windows and Unix/Linux
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
+ values_file = f.name
+ yaml.dump(values, f)
+ helm_args.extend(["--values", values_file])
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.warning("Failed to write values file: %s", e)
+
+ # Remove empty strings from args
+ helm_args = [arg for arg in helm_args if arg]
+
+ # Execute helm install
+ success, output = self._run_helm_command(helm_args)
+
+ # Clean up temporary values file
+ if values_file:
+ try:
+ if os.path.exists(values_file):
+ os.remove(values_file)
+ logger.debug("Removed temporary values file: %s", values_file)
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.debug("Failed to remove temporary values file: %s", e)
+
+ if success:
+ logger.info("SREClaw deployed/upgraded successfully")
+
+ # Wait for pod readiness if no_wait is False
+ if not no_wait:
+ logger.info("Waiting for SREClaw pods to be ready...")
+ if not self._wait_for_pods_ready(timeout=300): # 5 minutes total (2m helm + 3m pod readiness)
+ return False, "Timeout waiting for SREClaw pods to be ready"
+
+ return True, ""
+
+ return False, output
+
+ def get_agent_status(self) -> Dict: # pylint: disable=too-many-locals
+ """
+ Get the current status of SREClaw deployment.
+
+ Returns:
+ Dictionary containing status information
+ """
+ status = {
+ "namespace": self.namespace,
+ "helm_release": self.helm_release_name,
+ "deployments": [],
+ "pods": [],
+ "ready": False,
+ "llm_configs": []
+ }
+
+ try:
+ # First, check if helm release exists
+ list_success, list_output = self._run_helm_command([
+ "list",
+ "--namespace", self.namespace,
+ "--filter", self.helm_release_name,
+ "--output", "json"
+ ], check=False)
+
+ release_exists = False
+ if list_success:
+ try:
+ releases = json.loads(list_output)
+ release_exists = any(
+ release.get("name") == self.helm_release_name
+ for release in releases
+ )
+ except json.JSONDecodeError:
+ logger.warning("Failed to parse helm list output")
+
+ if release_exists:
+ # Get detailed helm release status
+ success, helm_output = self._run_helm_command([
+ "status", self.helm_release_name,
+ "--namespace", self.namespace,
+ "--output", "json"
+ ], check=False)
+
+ if success:
+ try:
+ helm_status = json.loads(helm_output)
+ status["helm_status"] = helm_status.get("info", {}).get("status")
+ except json.JSONDecodeError:
+ status["helm_status"] = "unknown"
+ else:
+ status["helm_status"] = "error"
+ else:
+ status["helm_status"] = "not_found"
+ status["ready"] = False
+ return status
+
+ # Get aks-sreclaw deployment status
+ deployment_list = self.apps_v1.list_namespaced_deployment(
+ namespace=self.namespace,
+ label_selector=AKS_SRECLAW_LABEL_SELECTOR
+ )
+ all_deployments = deployment_list.items
+
+ for deployment in all_deployments:
+ dep_status = {
+ "name": deployment.metadata.name,
+ "replicas": deployment.status.replicas or 0,
+ "ready_replicas": deployment.status.ready_replicas or 0,
+ "updated_replicas": deployment.status.updated_replicas or 0,
+ "available_replicas": deployment.status.available_replicas or 0
+ }
+ status["deployments"].append(dep_status)
+
+ # Get aks-sreclaw pod status
+ pods = self.core_v1.list_namespaced_pod(
+ namespace=self.namespace,
+ label_selector=AKS_SRECLAW_LABEL_SELECTOR
+ )
+
+ for pod in pods.items:
+ pod_status = {
+ "name": pod.metadata.name,
+ "phase": pod.status.phase,
+ "ready": False
+ }
+
+ # Check if pod is ready
+ if pod.status.conditions:
+ for condition in pod.status.conditions:
+ if condition.type == "Ready" and condition.status == "True":
+ pod_status["ready"] = True
+ break
+
+ status["pods"].append(pod_status)
+
+ # Determine overall readiness
+ if status["deployments"]:
+ all_deployments_ready = all(
+ dep["ready_replicas"] == dep["replicas"] and dep["replicas"] > 0
+ for dep in status["deployments"]
+ )
+ status["ready"] = all_deployments_ready
+
+ # Add LLM configuration information
+ if self.llm_config_manager.model_list:
+ for provider_name, provider_config in self.llm_config_manager.model_list.items():
+ llm_info = {"provider": provider_name}
+ if "models" in provider_config:
+ llm_info["models"] = provider_config["models"]
+ if "api_base" in provider_config and provider_config["api_base"]:
+ llm_info["api_base"] = provider_config["api_base"]
+ status["llm_configs"].append(llm_info)
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.error("Failed to get agent status: %s", e)
+ status["error"] = str(e)
+
+ return status
+
+ def get_llm_config(self) -> Dict:
+ """
+ Get LLM configuration from Kubernetes cluster.
+
+ Returns:
+ Dictionary of model configurations if exists, empty dict otherwise
+
+ Raises:
+ ApiException: If API error occurs (except 404)
+ AzCLIError: If unexpected error occurs
+ """
+ try:
+ # Check if the LLM config secret exists
+ self.core_v1.read_namespaced_secret(
+ name=self.llm_secret_name,
+ namespace=self.namespace
+ )
+ logger.debug("LLM config secret '%s' found", self.llm_secret_name)
+ return self.llm_config_manager.model_list if self.llm_config_manager.model_list else {}
+ except ApiException as e:
+ if e.status == 404:
+ logger.debug("LLM config secret '%s' not found in namespace '%s'",
+ self.llm_secret_name, self.namespace)
+ return {}
+ logger.error("Failed to check LLM config existence (API error %s): %s",
+ e.status, e)
+ raise
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.error("Unexpected error checking LLM config existence: %s", e)
+ raise AzCLIError(f"Failed to check LLM config existence: {e}")
+
+ def uninstall_sreclaw(self, delete_secret: bool = True, no_wait: bool = False) -> bool:
+ """
+ Uninstall SREClaw helm release and optionally delete LLM configuration secret.
+
+ Args:
+ delete_secret: Whether to delete the LLM configuration secret (default: True)
+ no_wait: Do not wait for the long-running operation to finish (default: False)
+
+ Returns:
+ True if uninstallation was successful
+ """
+ logger.info("Uninstalling SREClaw from namespace '%s'", self.namespace)
+
+ # Execute helm uninstall
+ helm_args = [
+ "uninstall", self.helm_release_name,
+ "--namespace", self.namespace,
+ "--timeout", "1m"
+ ]
+
+ # Add --wait flag only if no_wait is False
+ if not no_wait:
+ helm_args.append("--wait")
+
+ success, output = self._run_helm_command(helm_args)
+
+ # Check if release not found
+ if output == "RELEASE_NOT_FOUND":
+ logger.debug("Helm release '%s' not found", self.helm_release_name)
+ # Still try to delete the secret if it exists and requested
+ if delete_secret:
+ self.delete_llm_config_secret()
+ return True
+
+ if success:
+ logger.info("SREClaw uninstalled successfully")
+ # Delete the LLM configuration secret if requested
+ if delete_secret:
+ self.delete_llm_config_secret()
+
+ # Wait for pods to be removed only if not no_wait
+ if not no_wait:
+ logger.info("Waiting for pods to be removed...")
+ pods_removed = self._wait_for_pods_removed(timeout=60)
+ if not pods_removed:
+ logger.warning("Timeout waiting for all pods to be removed. Some pods may still be terminating.")
+
+ return True
+ raise AzCLIError(f"Failed to uninstall SREClaw: {output}")
+
+ def exec_aks_sreclaw(self, command_flags: str = "") -> bool:
+ """
+ Execute commands on the SREClaw pod using PodExecManager.
+
+ This method automatically discovers a running SREClaw pod and executes
+ the specified command on it.
+
+ Args:
+ command_flags: Additional flags for the aks-sreclaw command
+
+ Returns:
+ True if execution was successful
+
+ Raises:
+ AzCLIError: If execution fails or no running pods are found
+ """
+ logger.info("Executing SREClaw command with flags: %s", command_flags)
+
+ try:
+ # Find available SREClaw pods internally
+ success, result = self.get_sreclaw_pods()
+ if not success:
+ error_msg = f"Failed to find SREClaw pods: {result}\n"
+ error_msg += (
+ "SREClaw may not be deployed. "
+ "Run 'az aks claw create' to initialize the deployment."
+ )
+ raise AzCLIError(error_msg)
+
+ pod_names = result
+ if not pod_names:
+ error_msg = "No running SREClaw pods found.\n"
+ error_msg += (
+ "SREClaw may not be deployed. "
+ "Run 'az aks claw create' to initialize the deployment."
+ )
+ raise AzCLIError(error_msg)
+
+ # Use the first available pod or randomly select one?
+ pod_name = pod_names[0]
+ logger.debug("Using pod: %s", pod_name)
+
+ # Prepare the command to execute in the pod
+ exec_command = [
+ "/bin/bash", "-c",
+ f"TERM=xterm PYTHONUNBUFFERED=0 PROMPT_TOOLKIT_NO_CPR=1 python aks-sreclaw.py ask {command_flags}"
+ ]
+
+ # Execute the command using the standalone exec function
+ success = exec_command_in_pod(
+ pod_name=pod_name,
+ command=exec_command,
+ namespace=self.namespace,
+ kubeconfig_path=self.kubeconfig_path,
+ interactive=True,
+ tty=True
+ )
+
+ if not success:
+ raise AzCLIError("Failed to execute SREClaw command")
+
+ logger.info("AKS agent command executed successfully")
+ return True
+
+ except Exception as e:
+ logger.error("Failed to execute AKS agent command: %s", e)
+ raise
+
+ def create_llm_config_secret(self) -> None:
+ """Create or update the LLM configuration Kubernetes secret."""
+ secret_data = self.llm_config_manager.get_llm_model_secret_data()
+ secret_body = client.V1Secret(
+ api_version="v1",
+ kind="Secret",
+ metadata=client.V1ObjectMeta(name=self.llm_secret_name, namespace=self.namespace),
+ data=secret_data,
+ type="Opaque",
+ )
+ try:
+ self.core_v1.create_namespaced_secret(
+ namespace=self.namespace,
+ body=secret_body
+ )
+ logger.info("LLM configuration secret '%s' created successfully", self.llm_secret_name)
+
+ except ApiException as e:
+ if e.status == 409:
+ try:
+ self.core_v1.replace_namespaced_secret(
+ name=self.llm_secret_name,
+ namespace=self.namespace,
+ body=secret_body
+ )
+ logger.info("LLM configuration secret '%s' updated successfully", self.llm_secret_name)
+ except ApiException as update_error:
+ raise AzCLIError(f"Failed to update LLM configuration secret: {update_error}")
+ else:
+ raise AzCLIError(f"Failed to create LLM configuration secret: {e}")
+ except Exception as e:
+ raise AzCLIError(f"Unexpected error managing LLM configuration secret: {e}")
+
+ def create_gateway_token_secret(self) -> None:
+ """Create or update the openclaw-gateway-token secret with random token."""
+ import secrets
+
+ random_token = secrets.token_urlsafe(32)
+ secret_data = {
+ "OPENCLAW_GATEWAY_TOKEN": base64.b64encode(random_token.encode()).decode()
+ }
+
+ secret_body = client.V1Secret(
+ api_version="v1",
+ kind="Secret",
+ metadata=client.V1ObjectMeta(name=self.gateway_secret_name, namespace=self.namespace),
+ data=secret_data,
+ type="Opaque",
+ )
+ try:
+ self.core_v1.create_namespaced_secret(
+ namespace=self.namespace,
+ body=secret_body
+ )
+ logger.info("Gateway token secret '%s' created successfully", self.gateway_secret_name)
+
+ except ApiException as e:
+ if e.status == 409:
+ logger.info("Gateway token secret '%s' already exists, skipping creation", self.gateway_secret_name)
+ else:
+ raise AzCLIError(f"Failed to create gateway token secret: {e}")
+ except Exception as e:
+ raise AzCLIError(f"Unexpected error managing gateway token secret: {e}")
+
+ def delete_llm_config_secret(self) -> None:
+ """
+ Delete the LLM configuration Kubernetes secret.
+ Logs warning if secret doesn't exist, but doesn't raise an error.
+ """
+ try:
+ self.core_v1.delete_namespaced_secret(
+ name=self.llm_secret_name,
+ namespace=self.namespace
+ )
+ logger.info("LLM configuration secret '%s' deleted successfully", self.llm_secret_name)
+ except ApiException as e:
+ if e.status == 404:
+ logger.debug("LLM configuration secret '%s' not found, skipping deletion", self.llm_secret_name)
+ else:
+ logger.warning("Failed to delete LLM configuration secret: %s", e)
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.warning("Unexpected error deleting LLM configuration secret: %s", e)
+
+ def _create_helm_values(self):
+ """Create Helm values for deploying the AKS agent with LLM configuration."""
+ helm_values = {
+ "image": {
+ "repository": "mainred/openclaw-gateway",
+ "tag": "latest"
+ },
+ "secrets": {
+ "existingSecret": self.gateway_secret_name
+ },
+ "serviceAccount": {
+ "create": False,
+ "name": self.sreclaw_service_account_name
+ },
+ "azureWorkloadIdentity": {
+ "enabled": True
+ },
+ "aks": {
+ "clusterName": self.cluster_name,
+ "resourceGroup": self.resource_group_name,
+ "subscriptionId": self.subscription_id
+ },
+ "nodeSelector": {"kubernetes.io/os": "linux"}
+ }
+
+ if self.llm_config_manager.model_list:
+ providers = []
+ for provider_name, provider_config in self.llm_config_manager.model_list.items():
+ provider_entry = {
+ "name": provider_name,
+ "apiKeySecretKey": f"{provider_name}-key",
+ "models": provider_config.get("models", [])
+ }
+
+ if "api_base" in provider_config:
+ provider_entry["apiBase"] = provider_config["api_base"]
+
+ providers.append(provider_entry)
+
+ helm_values["openclaw"] = {
+ "llm": {
+ "apiKeySecretName": self.llm_secret_name,
+ "providers": providers
+ }
+ }
+
+ return helm_values
+
+ def save_llm_config(self, provider: LLMProvider, params: dict) -> None:
+ """Save LLM configuration and create necessary secrets."""
+ self.llm_config_manager.save(provider, params)
+ self.create_llm_config_secret()
+ self.create_gateway_token_secret()
+
+ def get_gateway_token(self) -> str:
+ """Get the gateway token from Kubernetes secret.
+
+ Returns:
+ The gateway token string
+
+ Raises:
+ AzCLIError: If secret is not found or token is missing
+ """
+ try:
+ secret = self.core_v1.read_namespaced_secret(
+ name=self.gateway_secret_name,
+ namespace=self.namespace
+ )
+
+ if not secret.data or "OPENCLAW_GATEWAY_TOKEN" not in secret.data:
+ raise AzCLIError(f"Gateway token not found in secret '{self.gateway_secret_name}'")
+
+ token = base64.b64decode(secret.data["OPENCLAW_GATEWAY_TOKEN"]).decode("utf-8")
+ return token
+
+ except ApiException as e:
+ if e.status == 404:
+ raise AzCLIError(
+ f"Gateway token secret '{self.gateway_secret_name}' not found in namespace '{self.namespace}'. "
+ f"Please ensure SREClaw is properly deployed."
+ )
+ raise AzCLIError(f"Failed to retrieve gateway token: {e}")
+
+ def port_forward_to_service(self, local_port: int = 18789) -> str: # pylint: disable=unused-argument
+ """Port-forward to aks-sreclaw service.
+
+ Args:
+ local_port: Local port to bind to (default: 18789)
+
+ Returns:
+ The gateway token for authentication (returned before port-forwarding starts)
+
+ Raises:
+ AzCLIError: If service or pod is not found, or port-forwarding fails
+ """
+ # Get gateway token first before starting port-forward
+ gateway_token = self.get_gateway_token()
+
+ try:
+ service = self.core_v1.read_namespaced_service(name=self.chart_name, namespace=self.namespace)
+ except ApiException as e:
+ if e.status == 404:
+ raise AzCLIError(f"Service '{self.chart_name}' not found in namespace '{self.namespace}'")
+ raise
+
+ selector = service.spec.selector
+ if not selector:
+ raise AzCLIError(f"Service '{self.chart_name}' has no selector")
+
+ label_selector = ",".join([f"{k}={v}" for k, v in selector.items()])
+ pods = self.core_v1.list_namespaced_pod(namespace=self.namespace, label_selector=label_selector)
+
+ if not pods.items:
+ raise AzCLIError(f"No pods found for service '{self.chart_name}' in namespace '{self.namespace}'")
+
+ pod = None
+ for p in pods.items:
+ if p.status.phase == "Running":
+ pod = p
+ break
+
+ if not pod:
+ raise AzCLIError(f"No running pods found for service '{self.chart_name}'")
+
+ pod_name = pod.metadata.name
+ target_port = 18789
+
+ logger.info("Found running pod: %s", pod_name)
+
+ # Return token to caller before starting blocking port-forward
+ return gateway_token, pod_name, target_port
+
+ def start_port_forward(self, pod_name: str, target_port: int, local_port: int = 18789) -> None:
+ """Start port-forwarding (blocking operation).
+
+ Args:
+ pod_name: Name of the pod to forward to
+ target_port: Target port on the pod
+ local_port: Local port to bind to (default: 18789)
+
+ Raises:
+ AzCLIError: If port-forwarding fails
+ """
+ import select
+ import socket
+ import threading
+
+ from kubernetes.stream import portforward
+
+ logger.info("Port-forwarding localhost:%d -> %s:%d", local_port, pod_name, target_port)
+
+ # Start a local TCP server and forward each connection through the k8s portforward API
+ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ server.bind(("127.0.0.1", local_port))
+ except OSError as e:
+ if e.errno == 98 or e.errno == 48: # Address already in use (Linux/Mac)
+ raise AzCLIError(
+ f"Port {local_port} is already in use. "
+ f"Please specify a different port using --local-port "
+ )
+ raise
+ server.listen(5)
+ server.settimeout(1.0) # allow periodic Ctrl+C checking
+
+ def _forward(local_conn, pf_socket):
+ """Bidirectionally forward data between local_conn and pf_socket."""
+ try:
+ while True:
+ readable, _, _ = select.select([local_conn, pf_socket], [], [], 1.0)
+ if local_conn in readable:
+ data = local_conn.recv(4096)
+ if not data:
+ break
+ pf_socket.sendall(data)
+ if pf_socket in readable:
+ data = pf_socket.recv(4096)
+ if not data:
+ break
+ local_conn.sendall(data)
+ except Exception: # pylint: disable=broad-exception-caught
+ pass
+ finally:
+ local_conn.close()
+ pf_socket.close()
+
+ try:
+ while True:
+ try:
+ conn, addr = server.accept()
+ except socket.timeout:
+ continue
+ logger.debug("Connection from %s", addr)
+ pf = portforward(
+ self.core_v1.connect_get_namespaced_pod_portforward,
+ pod_name,
+ self.namespace,
+ ports=str(target_port),
+ )
+ pf_sock = pf.socket(target_port)
+ pf_sock.setblocking(True)
+ t = threading.Thread(target=_forward, args=(conn, pf_sock), daemon=True)
+ t.start()
+ except KeyboardInterrupt:
+ logger.info("Stopping port-forward...")
+ finally:
+ server.close()
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/helm_manager.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/helm_manager.py
new file mode 100644
index 00000000000..51be2f69464
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/helm_manager.py
@@ -0,0 +1,330 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+import os
+import platform
+import shutil
+import stat
+import subprocess
+import tarfile
+import tempfile
+import urllib.request
+import zipfile
+from pathlib import Path
+from typing import List, Optional, Tuple
+
+from azext_aks_sreclaw._consts import HELM_VERSION
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+class HelmManager:
+ """
+ Helm Binary Manager for OS-agnostic helm operations.
+
+ This class provides functionality to:
+ - Download and manage helm binaries locally
+ - Support multiple operating systems and architectures
+ - Execute helm commands using the managed binary
+ - Share helm functionality across different chart deployments
+ """
+
+ def __init__(self, helm_version: str = HELM_VERSION, local_bin_dir: Optional[str] = None,
+ kubeconfig_path: Optional[str] = None):
+ """
+ Initialize the Helm Manager.
+
+ Args:
+ helm_version: Helm version to use (default: HELM_VERSION from _consts)
+ local_bin_dir: Local directory for helm binary (default: ~/.aks-agent/bin)
+ kubeconfig_path: Path to kubeconfig file (default: None - use default config)
+ """
+ self.helm_version = helm_version
+ self.kubeconfig_path = kubeconfig_path
+
+ # Set up local binary directory
+ if local_bin_dir:
+ self.local_bin_dir = Path(local_bin_dir)
+ else:
+ home_dir = Path.home()
+ self.local_bin_dir = home_dir / ".aks-agent" / "bin"
+
+ self.local_bin_dir.mkdir(parents=True, exist_ok=True)
+ self.helm_binary_path = self._ensure_helm_binary()
+
+ def _get_platform_info(self) -> Tuple[str, str]:
+ """
+ Get platform-specific information for helm binary download.
+
+ Returns:
+ Tuple of (os_name, arch) for helm binary selection
+ """
+ system = platform.system().lower()
+ machine = platform.machine().lower()
+
+ # Map system names
+ if system == "darwin":
+ os_name = "darwin"
+ elif system == "windows":
+ os_name = "windows"
+ elif system == "linux":
+ os_name = "linux"
+ else:
+ raise ValueError(f"Unsupported operating system: {system}")
+
+ # Map architecture names
+ if machine in ("x86_64", "amd64"):
+ arch = "amd64"
+ elif machine in ("aarch64", "arm64"):
+ arch = "arm64"
+ elif machine.startswith("arm"):
+ arch = "arm"
+ elif machine in ("i386", "i686"):
+ arch = "386"
+ else:
+ # Default to amd64 for unknown architectures
+ logger.warning("Unknown architecture %s, defaulting to amd64", machine)
+ arch = "amd64"
+
+ return os_name, arch
+
+ def _download_helm_binary(self) -> str:
+ """
+ Download helm binary for the current platform.
+
+ Returns:
+ Path to the downloaded helm binary
+ """
+ os_name, arch = self._get_platform_info()
+
+ # Construct download URL
+ if os_name == "windows":
+ filename = f"helm-v{self.helm_version}-{os_name}-{arch}.zip"
+ binary_name = "helm.exe"
+ else:
+ filename = f"helm-v{self.helm_version}-{os_name}-{arch}.tar.gz"
+ binary_name = "helm"
+
+ download_url = f"https://get.helm.sh/{filename}"
+ logger.info("Downloading helm binary from: %s", download_url)
+
+ # Download to temporary file
+ with tempfile.NamedTemporaryFile(delete=False, suffix=f".{filename.split('.')[-1]}") as temp_file:
+ try:
+ with urllib.request.urlopen(download_url) as response:
+ shutil.copyfileobj(response, temp_file)
+ temp_file_path = temp_file.name
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.error("Failed to download helm binary: %s", e)
+ raise
+
+ # Extract binary
+ extracted_binary_path = None
+ try:
+ if os_name == "windows":
+ # Handle ZIP file
+ with zipfile.ZipFile(temp_file_path, 'r') as zip_file:
+ # Find the helm binary in the zip
+ for member in zip_file.namelist():
+ if member.endswith(binary_name):
+ # Extract to local bin directory
+ zip_file.extract(member, self.local_bin_dir)
+ extracted_path = self.local_bin_dir / member
+ extracted_binary_path = self.local_bin_dir / binary_name
+ # Move to final location if needed
+ if extracted_path != extracted_binary_path:
+ shutil.move(str(extracted_path), str(extracted_binary_path))
+ # Clean up extracted directory if it exists
+ parent_dir = extracted_path.parent
+ if parent_dir != self.local_bin_dir and parent_dir.exists():
+ shutil.rmtree(parent_dir)
+ break
+ else:
+ # Handle TAR.GZ file
+ with tarfile.open(temp_file_path, 'r:gz') as tar_file:
+ # Find the helm binary in the tar
+ for member in tar_file.getnames():
+ if member.endswith(binary_name):
+ # Extract to local bin directory
+ tar_file.extract(member, self.local_bin_dir)
+ extracted_path = self.local_bin_dir / member
+ extracted_binary_path = self.local_bin_dir / binary_name
+ # Move to final location if needed
+ if extracted_path != extracted_binary_path:
+ shutil.move(str(extracted_path), str(extracted_binary_path))
+ # Clean up extracted directory if it exists
+ parent_dir = extracted_path.parent
+ if parent_dir != self.local_bin_dir and parent_dir.exists():
+ shutil.rmtree(parent_dir)
+ break
+
+ if not extracted_binary_path or not extracted_binary_path.exists():
+ raise ValueError("Helm binary not found in downloaded archive")
+
+ # Make binary executable on Unix systems
+ if os_name != "windows":
+ extracted_binary_path.chmod(extracted_binary_path.stat().st_mode | stat.S_IEXEC)
+
+ logger.info("Helm binary downloaded and extracted to: %s", extracted_binary_path)
+ return str(extracted_binary_path)
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.error("Failed to extract helm binary: %s", e)
+ raise
+ finally:
+ # Clean up temporary file
+ try:
+ os.unlink(temp_file_path)
+ except OSError:
+ pass
+
+ def _ensure_helm_binary(self) -> str:
+ """
+ Ensure helm binary is available locally.
+
+ Returns:
+ Path to helm binary
+ """
+ os_name, _ = self._get_platform_info()
+ binary_name = "helm.exe" if os_name == "windows" else "helm"
+ binary_path = self.local_bin_dir / binary_name
+
+ # Check if binary already exists and is executable
+ if binary_path.exists():
+ try:
+ # Test if binary works
+ result = subprocess.run(
+ [str(binary_path), "version", "--client", "--short"],
+ capture_output=True,
+ text=True,
+ timeout=10
+ )
+ if result.returncode == 0:
+ logger.debug("Using existing helm binary: %s", binary_path)
+ return str(binary_path)
+ logger.warning("Existing helm binary is not working, downloading new one")
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.warning("Failed to test existing helm binary: %s", e)
+
+ # Download helm binary
+ return self._download_helm_binary()
+
+ def run_command(self, args: List[str], check: bool = True, # pylint: disable=too-many-return-statements
+ timeout: int = 300) -> Tuple[bool, str]:
+ """
+ Execute a helm command using the locally managed helm binary.
+
+ Args:
+ args: List of helm command arguments
+ check: Whether to raise exception on non-zero exit code
+ timeout: Command timeout in seconds
+
+ Returns:
+ Tuple of (success, output)
+ """
+ cmd = [self.helm_binary_path]
+
+ # Add --kubeconfig flag if specified
+ if self.kubeconfig_path:
+ cmd.extend(["--kubeconfig", self.kubeconfig_path])
+ logger.debug("Using kubeconfig: %s", self.kubeconfig_path)
+
+ cmd.extend(args)
+ logger.debug("Executing helm command: %s", ' '.join(cmd))
+
+ try:
+ result = subprocess.run(
+ cmd,
+ capture_output=True,
+ text=True,
+ check=check,
+ timeout=timeout
+ )
+
+ if result.returncode == 0:
+ logger.debug("Helm command succeeded: %s", result.stdout)
+ return True, result.stdout
+ # Check if this is a "release not found" error
+ stderr_lower = result.stderr.lower()
+ if "release: not found" in stderr_lower or "not found" in stderr_lower:
+ logger.debug("Helm release not found: %s", result.stderr)
+ return False, "RELEASE_NOT_FOUND"
+
+ logger.error("Helm command failed: %s", result.stderr)
+ return False, result.stderr
+
+ except subprocess.TimeoutExpired:
+ error_msg = f"Helm command timed out: {' '.join(cmd)}"
+ logger.error("%s", error_msg)
+ return False, error_msg
+ except subprocess.CalledProcessError as e:
+ # Check if this is a "release not found" error
+ stderr_lower = e.stderr.lower() if e.stderr else ""
+ if "release: not found" in stderr_lower or "not found" in stderr_lower:
+ logger.debug("Helm release not found: %s", e.stderr)
+ return False, "RELEASE_NOT_FOUND"
+
+ error_msg = f"Helm command failed with exit code {e.returncode}: {e.stderr}"
+ logger.error("%s", error_msg)
+ return False, error_msg
+ except Exception as e: # pylint: disable=broad-exception-caught
+ error_msg = f"Unexpected error running helm command: {e}"
+ logger.error("%s", error_msg)
+ return False, error_msg
+
+ def get_version(self) -> Optional[str]:
+ """
+ Get the version of the helm binary.
+
+ Returns:
+ Helm version string or None if failed
+ """
+ success, output = self.run_command(["version", "--client", "--short"], check=False)
+ if success:
+ return output.strip()
+ return None
+
+ def repo_add(self, name: str, url: str) -> bool:
+ """
+ Add a helm repository.
+
+ Args:
+ name: Repository name
+ url: Repository URL
+
+ Returns:
+ True if successful
+ """
+ success, _ = self.run_command(["repo", "add", name, url])
+ return success
+
+ def repo_update(self) -> bool:
+ """
+ Update helm repositories.
+
+ Returns:
+ True if successful
+ """
+ success, _ = self.run_command(["repo", "update"])
+ return success
+
+
+def create_helm_manager(helm_version: str = HELM_VERSION,
+ local_bin_dir: Optional[str] = None) -> HelmManager:
+ """
+ Factory function to create a HelmManager instance.
+
+ Args:
+ helm_version: Helm version to use (default: HELM_VERSION from _consts)
+ local_bin_dir: Local directory for helm binary
+
+ Returns:
+ HelmManager instance
+ """
+ return HelmManager(
+ helm_version=helm_version,
+ local_bin_dir=local_bin_dir
+ )
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/pod_exec.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/pod_exec.py
new file mode 100644
index 00000000000..9493ca22488
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/k8s/pod_exec.py
@@ -0,0 +1,630 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+import json
+import os
+import platform
+import signal
+import struct
+import sys
+import threading
+import time
+from typing import List, Optional, Tuple
+
+from azext_aks_sreclaw._consts import (
+ AGENT_NAMESPACE,
+ HEARTBEAT_INTERVAL,
+ RESIZE_CHANNEL,
+)
+from knack.log import get_logger
+from kubernetes import client, config
+from kubernetes.stream import stream
+
+# Platform-specific imports
+IS_WINDOWS = platform.system() == 'Windows'
+
+if not IS_WINDOWS:
+ import fcntl
+ import select
+ import termios
+else:
+ # Windows doesn't have fcntl, select, or termios
+ fcntl = None
+ select = None
+ termios = None
+
+logger = get_logger(__name__)
+
+# WebSocket buffer size - matches Kubernetes client-go implementation
+# Reference: https://github.com/kubernetes/client-go/blob/master/transport/websocket/roundtripper.go#L67
+WEBSOCKET_BUFFER_SIZE = 32 * 1024 # 32 KiB
+
+
+def _get_terminal_size() -> Tuple[int, int]:
+ """
+ Get current terminal size.
+
+ Returns:
+ Tuple of (rows, cols)
+ """
+ try:
+ if IS_WINDOWS:
+ # Windows-specific terminal size detection
+ import shutil
+ size = shutil.get_terminal_size(fallback=(80, 24))
+ return size.lines, size.columns
+
+ # Unix/Linux terminal size detection
+ size_struct = struct.pack('HHHH', 0, 0, 0, 0)
+ result = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, size_struct)
+ rows, cols, _, _ = struct.unpack('HHHH', result)
+ return rows, cols
+ except (OSError, IOError, ImportError, AttributeError):
+ # Fallback to environment variables or defaults
+ return int(os.environ.get('LINES', 24)), int(os.environ.get('COLUMNS', 80))
+
+
+def _send_terminal_size(exec_stream, rows, cols):
+ """
+ Send terminal size to pod via WebSocket channel.
+
+ Args:
+ exec_stream: The WebSocket stream object
+ rows: Terminal height
+ cols: Terminal width
+ """
+ try:
+ resize_message = json.dumps({
+ "Width": cols,
+ "Height": rows
+ })
+ exec_stream.write_channel(RESIZE_CHANNEL, resize_message)
+ logger.debug("Terminal resized to %dx%d", cols, rows)
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.debug("Failed to send terminal size: %s", e)
+
+
+def _monitor_resize_events_windows(exec_stream, stop_event):
+ """
+ Monitor terminal resize events on Windows by polling.
+ Implementation based on Kubernetes kubectl.
+ Reference: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go # pylint: disable=line-too-long
+
+ Args:
+ exec_stream: The WebSocket stream object
+ stop_event: Threading event to stop monitoring
+ """
+ last_size = _get_terminal_size()
+
+ while not stop_event.is_set() and exec_stream.is_open():
+ try:
+ current_size = _get_terminal_size()
+ if current_size != last_size:
+ _send_terminal_size(exec_stream, current_size[0], current_size[1])
+ last_size = current_size
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.debug("Error monitoring terminal size: %s", e)
+ break
+
+ # Sleep to avoid hot looping (same interval as kubectl)
+ stop_event.wait(0.25)
+
+
+def _set_terminal_raw_mode():
+ """
+ Set terminal to raw mode and return state for restoration.
+ Implementation based on moby/term.
+
+ Returns:
+ Tuple of (fd, old_settings) for Unix/Linux or (stdin_handle, old_mode) for Windows
+ """
+ if IS_WINDOWS:
+ # Windows raw mode implementation
+ # Reference: https://github.com/moby/term/blob/main/termios_windows.go
+ import ctypes
+ from ctypes import wintypes
+
+ kernel32 = ctypes.windll.kernel32
+
+ # Get stdin handle
+ STD_INPUT_HANDLE = -10
+ stdin_handle = kernel32.GetStdHandle(STD_INPUT_HANDLE)
+
+ # Save current console mode
+ old_mode = wintypes.DWORD()
+ kernel32.GetConsoleMode(stdin_handle, ctypes.byref(old_mode))
+
+ # Console mode flags
+ ENABLE_ECHO_INPUT = 0x0004
+ ENABLE_LINE_INPUT = 0x0002
+ ENABLE_MOUSE_INPUT = 0x0010
+ ENABLE_WINDOW_INPUT = 0x0008
+ ENABLE_PROCESSED_INPUT = 0x0001
+ ENABLE_EXTENDED_FLAGS = 0x0080
+ ENABLE_INSERT_MODE = 0x0020
+ ENABLE_QUICK_EDIT_MODE = 0x0040
+ ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
+
+ # Disable these modes
+ new_mode = old_mode.value
+ new_mode &= ~ENABLE_ECHO_INPUT
+ new_mode &= ~ENABLE_LINE_INPUT
+ new_mode &= ~ENABLE_MOUSE_INPUT
+ new_mode &= ~ENABLE_WINDOW_INPUT
+ new_mode &= ~ENABLE_PROCESSED_INPUT
+
+ # Enable these modes
+ new_mode |= ENABLE_EXTENDED_FLAGS
+ new_mode |= ENABLE_INSERT_MODE
+ new_mode |= ENABLE_QUICK_EDIT_MODE
+ new_mode |= ENABLE_VIRTUAL_TERMINAL_INPUT
+
+ kernel32.SetConsoleMode(stdin_handle, new_mode)
+
+ return stdin_handle, old_mode.value
+
+ # Unix/Linux raw mode implementation
+ # Reference: https://github.com/moby/term/blob/main/termios_unix.go
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ new_settings = list(old_settings)
+
+ # Input modes - clear IGNBRK, BRKINT, PARMRK, ISTRIP, INLCR, IGNCR, ICRNL, IXON
+ new_settings[0] &= ~(termios.IGNBRK | termios.BRKINT | termios.PARMRK |
+ termios.ISTRIP | termios.INLCR | termios.IGNCR |
+ termios.ICRNL | termios.IXON)
+
+ # Output modes - clear OPOST
+ new_settings[1] &= ~termios.OPOST
+
+ # Local modes - clear ECHO, ECHONL, ICANON, ISIG, IEXTEN
+ new_settings[3] &= ~(termios.ECHO | termios.ECHONL | termios.ICANON |
+ termios.ISIG | termios.IEXTEN)
+
+ # Control modes - clear CSIZE, PARENB; set CS8
+ new_settings[2] &= ~(termios.CSIZE | termios.PARENB)
+ new_settings[2] |= termios.CS8
+
+ # Control characters - set VMIN = 1, VTIME = 0
+ new_settings[6][termios.VMIN] = 1
+ new_settings[6][termios.VTIME] = 0
+
+ termios.tcsetattr(fd, termios.TCSADRAIN, new_settings)
+
+ return fd, old_settings
+
+
+def _restore_terminal_mode(fd_or_handle, old_settings, windows_console_state=None):
+ """
+ Restore terminal to original mode.
+
+ Args:
+ fd_or_handle: File descriptor (Unix) or handle (Windows)
+ old_settings: Original terminal settings to restore
+ windows_console_state: Windows console state tuple (output_cp, input_cp, stdout_mode, stdout_handle)
+ """
+ try:
+ if IS_WINDOWS:
+ import ctypes
+ kernel32 = ctypes.windll.kernel32
+
+ # Restore terminal raw mode
+ kernel32.SetConsoleMode(fd_or_handle, old_settings)
+
+ # Restore Windows console settings (code pages and VT100 mode)
+ if windows_console_state is not None:
+ output_cp, input_cp, stdout_mode, stdout_handle = windows_console_state
+ kernel32.SetConsoleOutputCP(output_cp)
+ kernel32.SetConsoleCP(input_cp)
+ kernel32.SetConsoleMode(stdout_handle, stdout_mode)
+ else:
+ termios.tcsetattr(fd_or_handle, termios.TCSADRAIN, old_settings)
+ except (NameError, OSError, IOError) as e:
+ logger.debug("Failed to restore terminal mode: %s", e)
+
+
+def _is_blocking_error(error):
+ """
+ Check if an error is a blocking I/O error (resource temporarily unavailable).
+
+ Args:
+ error: The exception to check
+
+ Returns:
+ True if it's a blocking error (EAGAIN/EWOULDBLOCK)
+ """
+ import errno
+ err_code = getattr(error, 'errno', None) or getattr(error, 'winerror', None)
+ return err_code in (errno.EAGAIN, errno.EWOULDBLOCK) if err_code else False
+
+
+def _is_connection_reset_error(error):
+ """
+ Check if an error is a connection reset error.
+ Handles both Unix (ECONNRESET, EPIPE) and Windows (WSAECONNRESET) errors.
+
+ Args:
+ error: The exception to check
+
+ Returns:
+ True if connection was reset by remote
+ """
+ import errno
+ err_code = getattr(error, 'errno', None) or getattr(error, 'winerror', None)
+ # Unix: ECONNRESET, EPIPE
+ # Windows: WinError 10054 (WSAECONNRESET)
+ return err_code in (errno.ECONNRESET, errno.EPIPE, 10054) if err_code else False
+
+
+def _heartbeat_worker(exec_stream, stop_event):
+ """
+ Heartbeat worker thread to maintain WebSocket connection alive.
+ Sends periodic ping frames to prevent connection timeout.
+
+ Args:
+ exec_stream: The WebSocket stream object
+ stop_event: Threading event to stop the heartbeat
+ """
+ last_heartbeat = time.time()
+
+ while not stop_event.is_set() and exec_stream.is_open():
+ current_time = time.time()
+
+ # Send heartbeat if interval has passed
+ if current_time - last_heartbeat >= HEARTBEAT_INTERVAL:
+ try:
+ # Send ping frame through WebSocket
+ if hasattr(exec_stream, 'ping'):
+ exec_stream.ping()
+ else:
+ # Fallback: send empty data to keep connection alive
+ exec_stream.write_stdin('')
+
+ last_heartbeat = current_time
+ logger.debug("Heartbeat sent to maintain WebSocket connection")
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.debug("Heartbeat failed: %s", e)
+ break
+
+ # Sleep for a short interval to avoid busy waiting
+ stop_event.wait(min(1.0, HEARTBEAT_INTERVAL / 5))
+
+
+def exec_command_in_pod(pod_name: str, command: List[str], # pylint: disable=too-many-branches,too-many-locals
+ namespace: str = AGENT_NAMESPACE,
+ kubeconfig_path: Optional[str] = None,
+ interactive: bool = True,
+ tty: bool = True) -> bool:
+ """
+ Execute a command in a specific pod with interactive session.
+
+ Args:
+ pod_name: Name of the pod to exec into
+ command: Command to execute as a list of strings
+ namespace: Namespace of the pod (default: AGENT_NAMESPACE)
+ kubeconfig_path: Path to kubeconfig file (default: None - use default config)
+ interactive: Whether to enable interactive mode
+ tty: Whether to allocate a TTY
+
+ Returns:
+ True if execution was successful
+ """
+ logger.info("Executing command in pod '%s' in namespace '%s'", pod_name, namespace)
+ logger.debug("Command: %s", ' '.join(command))
+
+ # Variables for resource cleanup - initialized to None for safe cleanup in exception handlers
+ resp = None # WebSocket connection to pod exec API
+ terminal_state = None # Original terminal settings (termios structure on Unix, console mode on Windows)
+ original_sigwinch = None # Original SIGWINCH signal handler (Unix/Linux only)
+
+ # File descriptor and flags for restoring blocking mode
+ stdin_fd = None # stdin file descriptor number
+ original_stdin_flags = None # Original stdin flags (before setting O_NONBLOCK)
+
+ cleanup_done = False # Flag to prevent duplicate cleanup execution
+ windows_console_state = None # Saved Windows console settings (code pages and VT100 mode)
+ resize_stop_event = None # Event to signal resize monitoring thread to stop (Windows only)
+ resize_thread = None # Background thread for monitoring terminal resize events (Windows only)
+
+ def cleanup():
+ """Cleanup function to ensure proper resource cleanup."""
+ nonlocal cleanup_done
+
+ # Prevent duplicate cleanup
+ if cleanup_done:
+ return
+ cleanup_done = True
+
+ # Restore signal handler (Unix/Linux). Windows does not use signal handlers for resize.
+ if original_sigwinch and not IS_WINDOWS:
+ try:
+ signal.signal(signal.SIGWINCH, original_sigwinch)
+ except (ValueError, OSError):
+ pass
+
+ # Restore terminal mode and Windows console settings
+ if terminal_state is not None:
+ _restore_terminal_mode(terminal_state[0], terminal_state[1], windows_console_state)
+
+ # Restore stdin to blocking mode
+ if not IS_WINDOWS:
+ if stdin_fd is not None and original_stdin_flags is not None:
+ try:
+ fcntl.fcntl(stdin_fd, fcntl.F_SETFL, original_stdin_flags)
+ except (NameError, OSError, IOError):
+ pass
+
+ # Close WebSocket connection
+ if resp is not None:
+ try:
+ resp.close()
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.debug("Error closing WebSocket connection: %s", e)
+
+ # Register cleanup for SIGTERM
+ def signal_handler(signum, _frame):
+ logger.info("Received signal %d, cleaning up...", signum)
+ # Raise SystemExit to trigger finally block and normal cleanup
+ raise SystemExit(0)
+
+ original_sigterm = None
+ if hasattr(signal, 'SIGTERM'):
+ original_sigterm = signal.signal(signal.SIGTERM, signal_handler)
+ try:
+ # Initialize Kubernetes client
+ if kubeconfig_path:
+ config.load_kube_config(config_file=kubeconfig_path)
+ else:
+ config.load_kube_config()
+
+ core_v1 = client.CoreV1Api()
+
+ # Create the exec session
+ # Reference: https://github.com/kubernetes/client-go/blob/master/transport/websocket/roundtripper.go#L113
+ # client-go uses DataBufferSize + 1024 for both read and write buffers
+ # The +1024 accounts for the protocol byte indicating which channel the data is for
+ websocket_buffer_size = WEBSOCKET_BUFFER_SIZE + 1024
+
+ resp = stream(
+ core_v1.connect_get_namespaced_pod_exec,
+ pod_name,
+ namespace,
+ command=command,
+ stdin=interactive,
+ stdout=True,
+ stderr=True,
+ tty=tty,
+ _preload_content=False,
+ _request_timeout=None
+ )
+
+ # Set WebSocket buffer sizes directly on the underlying socket
+ # The kubernetes-client library doesn't support sockopt parameter natively,
+ # so we configure the socket after creation
+ try:
+ import socket
+ resp.sock.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, websocket_buffer_size)
+ resp.sock.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, websocket_buffer_size)
+ except (AttributeError, OSError):
+ # If setting socket options fails, continue anyway
+ # The connection will still work with default buffer sizes
+ pass
+
+ if not interactive:
+ # Non-interactive mode - just capture output
+ while resp.is_open():
+ resp.update(timeout=1)
+ if resp.peek_stdout():
+ print(resp.read_stdout(), end='')
+ if resp.peek_stderr():
+ print(resp.read_stderr(), end='', file=sys.stderr)
+ resp.close()
+ return True
+
+ # Interactive mode setup
+ heartbeat_stop_event = None
+ heartbeat_thread = None
+
+ try:
+ # Configure Windows console for UTF-8 output
+ if IS_WINDOWS:
+ import ctypes
+ kernel32 = ctypes.windll.kernel32
+
+ # Save original console settings
+ original_output_cp = kernel32.GetConsoleOutputCP()
+ original_input_cp = kernel32.GetConsoleCP()
+ STD_OUTPUT_HANDLE = -11
+ stdout_handle = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
+ original_mode = ctypes.c_uint32()
+ kernel32.GetConsoleMode(stdout_handle, ctypes.byref(original_mode))
+ windows_console_state = (original_output_cp, original_input_cp,
+ original_mode.value, stdout_handle)
+
+ # Set console output code page to UTF-8 (65001)
+ kernel32.SetConsoleOutputCP(65001)
+ # Set console input code page to UTF-8
+ kernel32.SetConsoleCP(65001)
+ # Enable VT100 processing for ANSI escape sequences
+ ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+ kernel32.SetConsoleMode(stdout_handle, original_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+
+ # Put terminal in raw mode to disable local echo
+ if tty:
+ terminal_state = _set_terminal_raw_mode()
+
+ # Set up terminal resize monitoring
+ # Reference: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubectl/pkg/util/term/
+ if IS_WINDOWS:
+ # Windows: Poll for size changes in background thread
+ resize_stop_event = threading.Event()
+ resize_thread = threading.Thread(
+ target=_monitor_resize_events_windows,
+ args=(resp, resize_stop_event),
+ daemon=True
+ )
+ resize_thread.start()
+ elif hasattr(signal, 'SIGWINCH'):
+ # Unix/Linux: Use SIGWINCH signal handler (must be in main thread)
+ def sigwinch_handler(_signum, _frame):
+ try:
+ rows, cols = _get_terminal_size()
+ _send_terminal_size(resp, rows, cols)
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.debug("Error handling terminal resize: %s", e)
+
+ original_sigwinch = signal.signal(signal.SIGWINCH, sigwinch_handler)
+
+ # Set up heartbeat mechanism
+ heartbeat_stop_event = threading.Event()
+ heartbeat_thread = threading.Thread(
+ target=_heartbeat_worker,
+ args=(resp, heartbeat_stop_event),
+ daemon=True
+ )
+ heartbeat_thread.start()
+
+ # Configure file descriptor blocking modes (Unix/Linux only)
+ if not IS_WINDOWS:
+ # stdin - set non-blocking for non-blocking reads
+ stdin_fd = sys.stdin.fileno()
+ original_stdin_flags = fcntl.fcntl(stdin_fd, fcntl.F_GETFL)
+ fcntl.fcntl(stdin_fd, fcntl.F_SETFL, original_stdin_flags | os.O_NONBLOCK)
+
+ # stdout - explicitly set to blocking mode for reliable writes
+ stdout_fd = sys.stdout.fileno()
+ stdout_flags = fcntl.fcntl(stdout_fd, fcntl.F_GETFL)
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFL, stdout_flags & ~os.O_NONBLOCK)
+
+ # stderr - explicitly set to blocking mode for reliable writes
+ stderr_fd = sys.stderr.fileno()
+ stderr_flags = fcntl.fcntl(stderr_fd, fcntl.F_GETFL)
+ fcntl.fcntl(stderr_fd, fcntl.F_SETFL, stderr_flags & ~os.O_NONBLOCK)
+
+ # Send initial terminal size if TTY is enabled
+ if tty:
+ try:
+ rows, cols = _get_terminal_size()
+ resize_message = json.dumps({
+ "Width": cols,
+ "Height": rows
+ })
+ resp.write_channel(RESIZE_CHANNEL, resize_message)
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.debug("Failed to send initial terminal size: %s", e)
+
+ # Main interaction loop
+ import errno
+ while resp.is_open():
+ try:
+ resp.update(timeout=0.1)
+ except (OSError, IOError) as e:
+ if _is_connection_reset_error(e):
+ logger.debug("Connection closed by remote: %s", e)
+ break
+ raise
+
+ # Handle stdout
+ if resp.peek_stdout():
+ stdout_data = resp.read_stdout()
+ data = stdout_data.encode()
+
+ # Write in chunks to avoid blocking on full pipe buffer
+ # This prevents "BlockingIOError: [Errno 35] write could not complete without blocking"
+ # which is easily reproducible on macOS (default pipe buffer: 64 KiB)
+ for start in range(0, len(data), WEBSOCKET_BUFFER_SIZE):
+ chunk = data[start: start + WEBSOCKET_BUFFER_SIZE]
+
+ while True:
+ try:
+ os.write(sys.stdout.fileno(), chunk)
+ break # success ā next chunk
+ except BlockingIOError as exc:
+ if exc.errno not in (errno.EAGAIN, errno.EWOULDBLOCK):
+ raise # unexpected error
+ time.sleep(0) # yield to let the system drain the pipe buffer
+
+ # Handle stderr
+ if resp.peek_stderr():
+ stderr_data = resp.read_stderr()
+ sys.stderr.write(stderr_data)
+ sys.stderr.flush()
+
+ # Handle stdin
+ try:
+ if IS_WINDOWS:
+ # Windows: Use msvcrt for non-blocking input
+ import msvcrt
+ if msvcrt.kbhit():
+ data = msvcrt.getwch()
+ if data:
+ try:
+ resp.write_stdin(data)
+ except OSError as e:
+ if _is_blocking_error(e):
+ logger.debug("stdin write blocked: %s", e)
+ elif _is_connection_reset_error(e):
+ logger.debug("Connection closed while writing stdin: %s", e)
+ break
+ else:
+ raise
+ else:
+ # Unix/Linux/macOS: Use select for non-blocking input
+ if select.select([sys.stdin], [], [], 0)[0]:
+ # Read in chunks matching WebSocket buffer size
+ # Reference: https://github.com/kubernetes/client-go/blob/master/transport/websocket/roundtripper.go#L113 # pylint: disable=line-too-long
+ # Even with O_NONBLOCK set, stdin.read() without args can block
+ try:
+ data = os.read(sys.stdin.fileno(), WEBSOCKET_BUFFER_SIZE).decode(
+ 'utf-8', errors='replace')
+ except BlockingIOError:
+ data = None
+ if data:
+ try:
+ resp.write_stdin(data)
+ except OSError as e:
+ if _is_blocking_error(e):
+ logger.debug("stdin write blocked: %s", e)
+ elif _is_connection_reset_error(e):
+ logger.debug("Connection closed while writing stdin: %s", e)
+ break
+ else:
+ raise
+ except (OSError, IOError, ImportError):
+ # No input available or import failed
+ pass
+
+ logger.info("Pod exec session completed successfully")
+ return True
+
+ except KeyboardInterrupt:
+ logger.info("Pod exec session interrupted by user")
+ return True
+ finally:
+ # Stop resize monitoring
+ if resize_stop_event:
+ resize_stop_event.set()
+ if resize_thread and resize_thread.is_alive():
+ resize_thread.join(timeout=2.0)
+
+ # Stop heartbeat
+ if heartbeat_stop_event:
+ heartbeat_stop_event.set()
+ if heartbeat_thread and heartbeat_thread.is_alive():
+ heartbeat_thread.join(timeout=2.0)
+
+ except Exception as e: # pylint: disable=broad-exception-caught
+ logger.error("Failed to execute command in pod '%s': %s", pod_name, e)
+ return False
+ finally:
+ # Always cleanup resources
+ cleanup()
+
+ # Restore original SIGTERM handler
+ if original_sigterm and hasattr(signal, 'SIGTERM'):
+ try:
+ signal.signal(signal.SIGTERM, original_sigterm)
+ except (ValueError, OSError):
+ pass
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_config_manager.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_config_manager.py
new file mode 100644
index 00000000000..fc49a09aadc
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_config_manager.py
@@ -0,0 +1,48 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+from typing import Dict
+
+from azext_aks_sreclaw.sreclaw.llm_providers import LLMProvider
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+class LLMConfigManager:
+ """Manages loading and saving LLM configuration from/to a YAML file."""
+
+ def __init__(self, model_list: Dict = None):
+ self.model_list = model_list if model_list is not None else {}
+
+ def save(self, provider: LLMProvider, params: dict):
+ models_str = params.get("models", "")
+ models = [m.strip() for m in models_str.split(",") if m.strip()]
+
+ provider_config = {
+ "provider": provider.name,
+ "models": models
+ }
+
+ if "api_base" in params:
+ provider_config["api_base"] = params["api_base"]
+
+ if "api_key" in params:
+ provider_config["api_key"] = params["api_key"]
+
+ self.model_list[provider.name] = provider_config
+
+ def get_llm_model_secret_data(self) -> Dict[str, str]:
+ """
+ Get Kubernetes secret data for all LLM providers in the configuration.
+ """
+ import base64
+ secrets_data = {}
+ for provider_name, provider_config in self.model_list.items():
+ if "api_key" in provider_config:
+ secret_key = f"{provider_name}-key"
+ api_key = provider_config["api_key"]
+ secrets_data[secret_key] = base64.b64encode(api_key.encode("utf-8")).decode("utf-8")
+ return secrets_data
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/__init__.py
new file mode 100644
index 00000000000..a914fe411ad
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/__init__.py
@@ -0,0 +1,80 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+from typing import List, Tuple
+
+from azext_aks_sreclaw.sreclaw.console import ERROR_COLOR, HELP_COLOR
+from rich.console import Console
+
+from .anthropic_provider import AnthropicProvider
+from .azure_provider import AzureProvider
+from .base import LLMProvider
+from .openai_provider import OpenAIProvider
+
+console = Console()
+
+_PROVIDER_CLASSES: List[LLMProvider] = [
+ AzureProvider,
+ OpenAIProvider,
+ AnthropicProvider,
+ # Add new providers here
+]
+
+PROVIDER_REGISTRY = {}
+for cls in _PROVIDER_CLASSES:
+ key = cls().name.lower()
+ if key not in PROVIDER_REGISTRY:
+ PROVIDER_REGISTRY[key] = cls
+
+
+def _available_providers() -> List[str]:
+ """Return a list of registered provider names (lowercase): ["azure", "openai", ...]"""
+ return _PROVIDER_CLASSES
+
+
+def _provider_choices_numbered() -> List[Tuple[int, str]]:
+ """Return numbered choices: [(1, "azure"), (2, "openai"), ...]."""
+ return [(i + 1, provider().readable_name) for i, provider in enumerate(_available_providers())]
+
+
+def _get_provider_by_index(idx: int) -> LLMProvider:
+ """
+ Return provider instance by numeric index (1-based).
+ Raises ValueError if index is out of range.
+ """
+ if 1 <= idx <= len(_PROVIDER_CLASSES):
+ console.print("You selected provider:", _PROVIDER_CLASSES[idx - 1]().readable_name, style=f"bold {HELP_COLOR}")
+ return _PROVIDER_CLASSES[idx - 1]()
+ raise ValueError(f"Invalid provider index: {idx}")
+
+
+def prompt_provider_choice() -> LLMProvider:
+ """
+ Show a numbered menu and return the chosen provider instance.
+ Keeps prompting until a valid selection is made.
+ """
+ choices = _provider_choices_numbered()
+ if not choices:
+ raise ValueError("No providers are registered.")
+ while True:
+ for idx, name in choices:
+ console.print(f" {idx}. {name}", style=f"bold {HELP_COLOR}")
+ sel_idx = console.input(
+ f"[bold {HELP_COLOR}]Please choose the LLM provider (1-{len(choices)}): [/bold {HELP_COLOR}]").strip()
+
+ if sel_idx == "/exit":
+ raise SystemExit(0)
+ try:
+ return _get_provider_by_index(int(sel_idx))
+ except ValueError as e:
+ console.print(
+ f"{e}. Please enter a valid number, or type '/exit' to exit.",
+ style=f"{ERROR_COLOR}")
+
+
+__all__ = [
+ "PROVIDER_REGISTRY",
+ "prompt_provider_choice",
+]
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/anthropic_provider.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/anthropic_provider.py
new file mode 100644
index 00000000000..14e29d18606
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/anthropic_provider.py
@@ -0,0 +1,67 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+
+from typing import Tuple
+
+from openai import OpenAI
+
+from .base import LLMProvider, non_empty
+
+
+class AnthropicProvider(LLMProvider):
+ @property
+ def readable_name(self) -> str:
+ return "Anthropic"
+
+ @property
+ def name(self) -> str:
+ return "anthropic"
+
+ @property
+ def parameter_schema(self):
+ return {
+ "api_key": {
+ "secret": True,
+ "default": None,
+ "hint": None,
+ "validator": non_empty
+ },
+ "models": {
+ "secret": False,
+ "default": "claude-sonnet-4",
+ "hint": "comma-separated model names, e.g., claude-sonnet-4,claude-opus-4",
+ "validator": non_empty
+ },
+ }
+
+ def validate_connection(self, params: dict) -> Tuple[str, str]:
+ api_key = params.get("api_key")
+ models_str = params.get("models")
+ if not all([api_key, models_str]):
+ return "Missing required Anthropic parameters.", "retry_input"
+
+ models = [m.strip() for m in models_str.split(",")]
+ client = OpenAI(
+ api_key=api_key,
+ base_url="https://api.anthropic.com/v1"
+ )
+
+ for model_name in models:
+ try:
+ client.chat.completions.create(
+ model=model_name,
+ messages=[{"role": "user", "content": "ping"}],
+ max_tokens=16,
+ timeout=10
+ )
+ except Exception as e: # pylint: disable=broad-exception-caught
+ error_str = str(e).lower()
+ if any(x in error_str for x in ["api key", "authentication", "unauthorized",
+ "invalid", "bad request"]):
+ return f"Model '{model_name}' validation failed: {e}", "retry_input"
+ return f"Model '{model_name}' connection error: {e}", "connection_error"
+
+ return None, "save"
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/azure_provider.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/azure_provider.py
new file mode 100644
index 00000000000..839654ee9da
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/azure_provider.py
@@ -0,0 +1,82 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+
+from typing import Tuple
+
+from openai import AzureOpenAI
+
+from .base import LLMProvider, is_valid_url, non_empty
+
+
+def is_valid_api_base(v: str) -> bool:
+ if not v.startswith("https://"):
+ return False
+ return is_valid_url(v)
+
+
+class AzureProvider(LLMProvider):
+ @property
+ def readable_name(self) -> str:
+ return "Azure OpenAI"
+
+ @property
+ def name(self) -> str:
+ return "azure-openai"
+
+ @property
+ def parameter_schema(self):
+ return {
+ "models": {
+ "secret": False,
+ "default": None,
+ "hint": "comma-separated deployment names, e.g., gpt-5.4,gpt-5.1",
+ "validator": non_empty,
+ "alias": "models"
+ },
+ "api_key": {
+ "secret": True,
+ "default": None,
+ "hint": None,
+ "validator": non_empty
+ },
+ "api_base": {
+ "secret": False,
+ "default": None,
+ "hint": "e.g., https://YOUR-RESOURCE-NAME.openai.azure.com/openai/v1/",
+ "validator": is_valid_api_base
+ }
+ }
+
+ def validate_connection(self, params: dict) -> Tuple[str, str]:
+ api_key = params.get("api_key")
+ api_base = params.get("api_base")
+ models_str = params.get("models")
+
+ if not all([api_key, api_base, models_str]):
+ return "Missing required Azure OpenAI parameters.", "retry_input"
+
+ models = [m.strip() for m in models_str.split(",")]
+ client = AzureOpenAI(
+ api_key=api_key,
+ azure_endpoint=api_base
+ )
+
+ for model_name in models:
+ try:
+ client.responses.create(
+ model=model_name,
+ instructions="You are a helpful assistant.",
+ input="ping",
+ timeout=10
+ )
+ except Exception as e: # pylint: disable=broad-exception-caught
+ error_str = str(e).lower()
+ if any(x in error_str for x in ["api key", "authentication", "unauthorized",
+ "invalid", "bad request", "deployment"]):
+ return f"Model '{model_name}' validation failed: {e}", "retry_input"
+ return f"Model '{model_name}' connection error: {e}", "connection_error"
+
+ return None, "save"
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/base.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/base.py
new file mode 100644
index 00000000000..7f7b936b3f5
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/base.py
@@ -0,0 +1,149 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+
+import base64 # pylint: disable=unused-import
+from abc import ABC, abstractmethod
+from typing import Any, Callable, Dict, Tuple
+from urllib.parse import urlparse
+
+from azext_aks_sreclaw.sreclaw.console import (
+ DEFAULT_VALUE_COLOR,
+ ERROR_COLOR,
+ HELP_COLOR,
+ HINT_COLOR,
+ get_console,
+)
+
+
+def non_empty(v: str) -> bool:
+ return bool(v and v.strip())
+
+
+def is_valid_url(v: str) -> bool:
+ try:
+ parsed = urlparse(v)
+ if not parsed.scheme or not parsed.netloc:
+ return False
+ return True
+ except ValueError:
+ return False
+
+
+class LLMProvider(ABC):
+
+ @property
+ @abstractmethod
+ def readable_name(self) -> str:
+ """Return the provider name for this provider.
+ The provider name is a human-readable string, e.g., "Azure OpenAI", "OpenAI", etc.
+ """
+ return "Base Provider"
+
+ @property
+ def name(self) -> str:
+ """Return the provider name for this provider.
+ This name is used as the OpenClaw LLM provider identifier and must match
+ the provider name expected by the OpenClaw configuration.
+ Examples: "azure-openai", "openai", "anthropic"
+ """
+ return ""
+
+ @property
+ @abstractmethod
+ def parameter_schema(self) -> Dict[str, Dict[str, Any]]:
+ """
+ provider may return a schema mapping param -> metadata:
+ {
+ "PARAM_NAME": {
+ "prompt": "Prompt to show user",
+ "secret": True/False,
+ "default": "default value or None",
+ "hint": "Additional hint to show user",
+ "validator": Callable[[str], bool] # function to validate input,
+ "alias": "alias" # optional alternative names for the param
+ }
+ }
+ """
+ raise NotImplementedError()
+
+ def prompt_params(self):
+ """Prompt user for parameters using parameter_schema when available."""
+ schema = self.parameter_schema
+ params = {}
+ for param, meta in schema.items():
+ prompt_name = param
+ if "alias" in meta:
+ prompt_name = meta["alias"]
+ prompt = meta.get("prompt", f"[bold {HELP_COLOR}]Enter value for {prompt_name}: [/]")
+ default = meta.get("default")
+ hint = meta.get("hint")
+ secret = meta.get("secret", False)
+ validator: Callable[[str], bool] = meta.get(
+ "validator", lambda x: True)
+
+ if default:
+ prompt += f" [italic {DEFAULT_VALUE_COLOR}](Default: {default})[/] "
+ if hint:
+ prompt += f" [italic {HINT_COLOR}](Hint: {hint})[/] "
+
+ console = get_console()
+ while True:
+ if secret:
+ # For password input, we'll handle the display differently
+ value = console.input(prompt, password=secret)
+ # Calculate the masked display value following OpenAI pattern
+ if len(value) <= 8:
+ # For short passwords, show all as asterisks
+ display_value = '*' * len(value)
+ else:
+ # Show first 3 chars + 3 dots + last 4 chars (OpenAI pattern)
+ first_chars = value[:3]
+ last_chars = value[-4:]
+ display_value = f"{first_chars}...{last_chars}"
+ # It seems rich renders the cursor up as plain text not a control sequence,
+ # so when we combine the cursor up and re-print, console prints extra "[1A" unexpectedly.
+ # To avoid that, we use a workaround by printing the cursor up separately.
+ print("\033[1A", end='')
+ console.print(f"{prompt}{display_value}")
+ else:
+ value = console.input(prompt, password=False)
+
+ if not value and default is not None:
+ value = default
+
+ value = value.strip()
+ if value == "/exit":
+ raise SystemExit(0)
+ if validator(value):
+ params[param] = value
+ break
+ console.print(
+ f"Invalid value for {prompt_name}. Please try again, or type '/exit' to exit.",
+ style=f"{ERROR_COLOR}")
+
+ return params
+
+ def validate_params(self, params: dict):
+ """Validate parameters from provided config file against schema."""
+ schema = self.parameter_schema
+ for param, meta in schema.items():
+ if param not in params:
+ raise ValueError(f"Missing required parameter: {param}")
+ validator: Callable[[str], bool] = meta.get(
+ "validator", lambda x: True)
+ if not validator(params[param]):
+ raise ValueError(f"Invalid value for parameter: {param}")
+ return True
+
+ @abstractmethod
+ def validate_connection(self, params: dict) -> Tuple[str, str]:
+ """
+ Validate connection to the model endpoint using provided parameters.
+ Returns a tuple of (error: str | None, action: str)
+ where error is None if validation is successful, otherwise contains the error message.
+ Action can be "retry_input", "connection_error", or "save".
+ """
+ raise NotImplementedError()
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/openai_provider.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/openai_provider.py
new file mode 100644
index 00000000000..9d1ffcd00bf
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/llm_providers/openai_provider.py
@@ -0,0 +1,63 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+
+from typing import Tuple
+
+from openai import OpenAI
+
+from .base import LLMProvider, non_empty
+
+
+class OpenAIProvider(LLMProvider):
+ @property
+ def readable_name(self) -> str:
+ return "OpenAI"
+
+ @property
+ def name(self) -> str:
+ return "openai"
+
+ @property
+ def parameter_schema(self):
+ return {
+ "models": {
+ "secret": False,
+ "default": "gpt-5",
+ "hint": "comma-separated model names, e.g., gpt-5.4,gpt-5.1",
+ "validator": non_empty
+ },
+ "api_key": {
+ "secret": True,
+ "default": None,
+ "hint": None,
+ "validator": non_empty
+ },
+ }
+
+ def validate_connection(self, params: dict) -> Tuple[str, str]:
+ api_key = params.get("api_key")
+ models_str = params.get("models")
+ if not all([api_key, models_str]):
+ return "Missing required OpenAI parameters.", "retry_input"
+
+ models = [m.strip() for m in models_str.split(",")]
+ client = OpenAI(api_key=api_key)
+
+ for model_name in models:
+ try:
+ client.responses.create(
+ model=model_name,
+ instructions="You are a helpful assistant.",
+ input="ping",
+ timeout=10
+ )
+ except Exception as e: # pylint: disable=broad-exception-caught
+ error_str = str(e).lower()
+ if any(x in error_str for x in ["api key", "authentication", "unauthorized",
+ "invalid", "bad request"]):
+ return f"Model '{model_name}' validation failed: {e}", "retry_input"
+ return f"Model '{model_name}' connection error: {e}", "connection_error"
+ return None, "save"
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/telemetry.py b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/telemetry.py
new file mode 100644
index 00000000000..23ab6ec9f57
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/sreclaw/telemetry.py
@@ -0,0 +1,100 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+import datetime
+import logging
+import os
+import platform
+
+from applicationinsights import TelemetryClient
+from azure.cli.core.telemetry import (
+ _get_azure_subscription_id,
+ _get_hash_mac_address,
+ _get_user_agent,
+)
+
+DEFAULT_INSTRUMENTATION_KEY = "c301e561-daea-42d9-b9d1-65fca4166704"
+APPLICATIONINSIGHTS_INSTRUMENTATION_KEY_ENV = "APPLICATIONINSIGHTS_INSTRUMENTATION_KEY"
+
+
+class CLITelemetryClient:
+ def __init__(self, event_type="startup"):
+ instrumentation_key = self._get_application_insights_instrumentation_key()
+ self._telemetry_client = TelemetryClient(
+ instrumentation_key=instrumentation_key
+ )
+ self.start_time = datetime.datetime.utcnow()
+ self.end_time = ""
+ self.event_type = event_type
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.end_time = datetime.datetime.utcnow()
+ if self.event_type == "create":
+ self.track_agent_create()
+ elif self.event_type == "cleanup":
+ self.track_agent_cleanup()
+ else: # default to startup
+ self.track_agent_started()
+ self.flush()
+
+ def track(self, event_name, properties=None):
+ if properties is None:
+ properties = {}
+ properties.update(self._generate_payload())
+ self._telemetry_client.track_trace(event_name, properties, logging.INFO)
+
+ def track_agent_started(self):
+ timestamp_properties = {
+ "time.start": str(self.start_time),
+ "time.end": str(self.end_time),
+ }
+ self.track("AgentCLIStartup", properties=timestamp_properties)
+
+ def track_agent_create(self):
+ timestamp_properties = {
+ "time.start": str(self.start_time),
+ "time.end": str(self.end_time),
+ }
+ self.track("AgentCLICreate", properties=timestamp_properties)
+
+ def track_agent_cleanup(self):
+ timestamp_properties = {
+ "time.start": str(self.start_time),
+ "time.end": str(self.end_time),
+ }
+ self.track("AgentCLICleanup", properties=timestamp_properties)
+
+ def flush(self):
+ self._telemetry_client.flush()
+
+ def _generate_payload(self):
+ extension_name = "aks-sreclaw"
+ try:
+ from azure.cli.core.extension import get_extension
+
+ ext_name = "aks-sreclaw"
+ ext = get_extension(ext_name)
+ extension_name = f"aks-sreclaw@{ext.version}"
+ except: # pylint: disable=W0702
+ pass
+
+ return {
+ "device.id": _get_hash_mac_address(),
+ "service.name": "aks sreclaw",
+ "userAzureSubscriptionId": _get_azure_subscription_id(),
+ "OS.Type": platform.system().lower(), # eg. darwin, windows
+ "OS.Version": platform.version().lower(), # eg. 10.0.14942
+ "OS.Platform": platform.platform().lower(), # eg. windows-10-10.0.19041-sp0
+ "userAgent": _get_user_agent(),
+ "extensionname": extension_name, # extension and version
+ }
+
+ def _get_application_insights_instrumentation_key(self) -> str:
+ return os.getenv(
+ APPLICATIONINSIGHTS_INSTRUMENTATION_KEY_ENV, DEFAULT_INSTRUMENTATION_KEY
+ )
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/__init__.py
new file mode 100644
index 00000000000..999067d049c
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/__init__.py
@@ -0,0 +1,32 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+from ._container_service_client import ContainerServiceClient # type: ignore
+from ._version import VERSION
+
+__version__ = VERSION
+
+try:
+ from ._patch import __all__ as _patch_all
+ from ._patch import *
+except ImportError:
+ _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+ "ContainerServiceClient",
+]
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
+
+_patch_sdk()
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_configuration.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_configuration.py
new file mode 100644
index 00000000000..2a018b2cb81
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_configuration.py
@@ -0,0 +1,75 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Optional, TYPE_CHECKING
+
+from azure.core.pipeline import policies
+from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
+
+from ._version import VERSION
+
+if TYPE_CHECKING:
+ from azure.core import AzureClouds
+ from azure.core.credentials import TokenCredential
+
+
+class ContainerServiceClientConfiguration: # pylint: disable=too-many-instance-attributes
+ """Configuration for ContainerServiceClient.
+
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param credential: Credential needed for the client to connect to Azure. Required.
+ :type credential: ~azure.core.credentials.TokenCredential
+ :param subscription_id: The ID of the target subscription. The value must be an UUID. Required.
+ :type subscription_id: str
+ :param cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is
+ None.
+ :type cloud_setting: ~azure.core.AzureClouds
+ :keyword api_version: Api Version. Default value is "2025-10-01". Note that overriding this
+ default value may result in unsupported behavior.
+ :paramtype api_version: str
+ """
+
+ def __init__(
+ self,
+ credential: "TokenCredential",
+ subscription_id: str,
+ cloud_setting: Optional["AzureClouds"] = None,
+ **kwargs: Any
+ ) -> None:
+ api_version: str = kwargs.pop("api_version", "2025-10-01")
+
+ if credential is None:
+ raise ValueError("Parameter 'credential' must not be None.")
+ if subscription_id is None:
+ raise ValueError("Parameter 'subscription_id' must not be None.")
+
+ self.credential = credential
+ self.subscription_id = subscription_id
+ self.cloud_setting = cloud_setting
+ self.api_version = api_version
+ self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
+ kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION))
+ self.polling_interval = kwargs.get("polling_interval", 30)
+ self._configure(**kwargs)
+
+ def _configure(self, **kwargs: Any) -> None:
+ self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
+ self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
+ self.authentication_policy = kwargs.get("authentication_policy")
+ if self.credential and not self.authentication_policy:
+ self.authentication_policy = ARMChallengeAuthenticationPolicy(
+ self.credential, *self.credential_scopes, **kwargs
+ )
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_container_service_client.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_container_service_client.py
new file mode 100644
index 00000000000..b5b52bb4815
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_container_service_client.py
@@ -0,0 +1,197 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Optional, TYPE_CHECKING, cast
+from typing_extensions import Self
+
+from azure.core.pipeline import policies
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.settings import settings
+from azure.mgmt.core import ARMPipelineClient
+from azure.mgmt.core.policies import ARMAutoResourceProviderRegistrationPolicy
+from azure.mgmt.core.tools import get_arm_endpoints
+
+from . import models as _models
+from ._configuration import ContainerServiceClientConfiguration
+from ._utils.serialization import Deserializer, Serializer
+from .operations import (
+ AgentPoolsOperations,
+ MachinesOperations,
+ MaintenanceConfigurationsOperations,
+ ManagedClustersOperations,
+ ManagedNamespacesOperations,
+ Operations,
+ PrivateEndpointConnectionsOperations,
+ PrivateLinkResourcesOperations,
+ ResolvePrivateLinkServiceIdOperations,
+ SnapshotsOperations,
+ TrustedAccessRoleBindingsOperations,
+ TrustedAccessRolesOperations,
+)
+
+if TYPE_CHECKING:
+ from azure.core import AzureClouds
+ from azure.core.credentials import TokenCredential
+
+
+class ContainerServiceClient: # pylint: disable=too-many-instance-attributes
+ """The Container Service Client.
+
+ :ivar operations: Operations operations
+ :vartype operations: azure.mgmt.containerservice.operations.Operations
+ :ivar managed_clusters: ManagedClustersOperations operations
+ :vartype managed_clusters: azure.mgmt.containerservice.operations.ManagedClustersOperations
+ :ivar maintenance_configurations: MaintenanceConfigurationsOperations operations
+ :vartype maintenance_configurations:
+ azure.mgmt.containerservice.operations.MaintenanceConfigurationsOperations
+ :ivar managed_namespaces: ManagedNamespacesOperations operations
+ :vartype managed_namespaces: azure.mgmt.containerservice.operations.ManagedNamespacesOperations
+ :ivar agent_pools: AgentPoolsOperations operations
+ :vartype agent_pools: azure.mgmt.containerservice.operations.AgentPoolsOperations
+ :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
+ :vartype private_endpoint_connections:
+ azure.mgmt.containerservice.operations.PrivateEndpointConnectionsOperations
+ :ivar private_link_resources: PrivateLinkResourcesOperations operations
+ :vartype private_link_resources:
+ azure.mgmt.containerservice.operations.PrivateLinkResourcesOperations
+ :ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations
+ :vartype resolve_private_link_service_id:
+ azure.mgmt.containerservice.operations.ResolvePrivateLinkServiceIdOperations
+ :ivar snapshots: SnapshotsOperations operations
+ :vartype snapshots: azure.mgmt.containerservice.operations.SnapshotsOperations
+ :ivar trusted_access_role_bindings: TrustedAccessRoleBindingsOperations operations
+ :vartype trusted_access_role_bindings:
+ azure.mgmt.containerservice.operations.TrustedAccessRoleBindingsOperations
+ :ivar trusted_access_roles: TrustedAccessRolesOperations operations
+ :vartype trusted_access_roles:
+ azure.mgmt.containerservice.operations.TrustedAccessRolesOperations
+ :ivar machines: MachinesOperations operations
+ :vartype machines: azure.mgmt.containerservice.operations.MachinesOperations
+ :param credential: Credential needed for the client to connect to Azure. Required.
+ :type credential: ~azure.core.credentials.TokenCredential
+ :param subscription_id: The ID of the target subscription. The value must be an UUID. Required.
+ :type subscription_id: str
+ :param base_url: Service URL. Default value is None.
+ :type base_url: str
+ :keyword cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is
+ None.
+ :paramtype cloud_setting: ~azure.core.AzureClouds
+ :keyword api_version: Api Version. Default value is "2025-10-01". Note that overriding this
+ default value may result in unsupported behavior.
+ :paramtype api_version: str
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ """
+
+ def __init__(
+ self,
+ credential: "TokenCredential",
+ subscription_id: str,
+ base_url: Optional[str] = None,
+ *,
+ cloud_setting: Optional["AzureClouds"] = None,
+ **kwargs: Any
+ ) -> None:
+ _cloud = cloud_setting or settings.current.azure_cloud # type: ignore
+ _endpoints = get_arm_endpoints(_cloud)
+ if not base_url:
+ base_url = _endpoints["resource_manager"]
+ credential_scopes = kwargs.pop("credential_scopes", _endpoints["credential_scopes"])
+ self._config = ContainerServiceClientConfiguration(
+ credential=credential,
+ subscription_id=subscription_id,
+ cloud_setting=cloud_setting,
+ credential_scopes=credential_scopes,
+ **kwargs
+ )
+
+ _policies = kwargs.pop("policies", None)
+ if _policies is None:
+ _policies = [
+ policies.RequestIdPolicy(**kwargs),
+ self._config.headers_policy,
+ self._config.user_agent_policy,
+ self._config.proxy_policy,
+ policies.ContentDecodePolicy(**kwargs),
+ ARMAutoResourceProviderRegistrationPolicy(),
+ self._config.redirect_policy,
+ self._config.retry_policy,
+ self._config.authentication_policy,
+ self._config.custom_hook_policy,
+ self._config.logging_policy,
+ policies.DistributedTracingPolicy(**kwargs),
+ policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+ self._config.http_logging_policy,
+ ]
+ self._client: ARMPipelineClient = ARMPipelineClient(base_url=cast(str, base_url), policies=_policies, **kwargs)
+
+ client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+ self._serialize.client_side_validation = False
+ self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
+ self.managed_clusters = ManagedClustersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.maintenance_configurations = MaintenanceConfigurationsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_namespaces = ManagedNamespacesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.private_link_resources = PrivateLinkResourcesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.snapshots = SnapshotsOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.trusted_access_role_bindings = TrustedAccessRoleBindingsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.trusted_access_roles = TrustedAccessRolesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.machines = MachinesOperations(self._client, self._config, self._serialize, self._deserialize)
+
+ def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse:
+ """Runs the network request through the client's chained policies.
+
+ >>> from azure.core.rest import HttpRequest
+ >>> request = HttpRequest("GET", "https://www.example.org/")
+
+ >>> response = client._send_request(request)
+
+
+ For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+ :param request: The network request you want to make. Required.
+ :type request: ~azure.core.rest.HttpRequest
+ :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+ :return: The response of your network call. Does not do error handling on your response.
+ :rtype: ~azure.core.rest.HttpResponse
+ """
+
+ request_copy = deepcopy(request)
+ request_copy.url = self._client.format_url(request_copy.url)
+ return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore
+
+ def close(self) -> None:
+ self._client.close()
+
+ def __enter__(self) -> Self:
+ self._client.__enter__()
+ return self
+
+ def __exit__(self, *exc_details: Any) -> None:
+ self._client.__exit__(*exc_details)
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_patch.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_patch.py
new file mode 100644
index 00000000000..8bcb627aa47
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_patch.py
@@ -0,0 +1,21 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+ """Do not remove from this file.
+
+ `patch_sdk` is a last resort escape hatch that allows you to do customizations
+ you can't accomplish using the techniques described in
+ https://aka.ms/azsdk/python/dpcodegen/python/customize
+ """
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_utils/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_utils/__init__.py
new file mode 100644
index 00000000000..0af9b28f660
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_utils/__init__.py
@@ -0,0 +1,6 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_utils/serialization.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_utils/serialization.py
new file mode 100644
index 00000000000..ff543ed937f
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_utils/serialization.py
@@ -0,0 +1,2030 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+# pyright: reportUnnecessaryTypeIgnoreComment=false
+
+from base64 import b64decode, b64encode
+import calendar
+import datetime
+import decimal
+import email
+from enum import Enum
+import json
+import logging
+import re
+import sys
+import codecs
+from typing import (
+ Any,
+ cast,
+ Optional,
+ Union,
+ AnyStr,
+ IO,
+ Mapping,
+ Callable,
+ MutableMapping,
+)
+
+try:
+ from urllib import quote # type: ignore
+except ImportError:
+ from urllib.parse import quote
+import xml.etree.ElementTree as ET
+
+import isodate # type: ignore
+from typing_extensions import Self
+
+from azure.core.exceptions import DeserializationError, SerializationError
+from azure.core.serialization import NULL as CoreNull
+
+_BOM = codecs.BOM_UTF8.decode(encoding="utf-8")
+
+JSON = MutableMapping[str, Any]
+
+
+class RawDeserializer:
+
+ # Accept "text" because we're open minded people...
+ JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$")
+
+ # Name used in context
+ CONTEXT_NAME = "deserialized_data"
+
+ @classmethod
+ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any:
+ """Decode data according to content-type.
+
+ Accept a stream of data as well, but will be load at once in memory for now.
+
+ If no content-type, will return the string version (not bytes, not stream)
+
+ :param data: Input, could be bytes or stream (will be decoded with UTF8) or text
+ :type data: str or bytes or IO
+ :param str content_type: The content type.
+ :return: The deserialized data.
+ :rtype: object
+ """
+ if hasattr(data, "read"):
+ # Assume a stream
+ data = cast(IO, data).read()
+
+ if isinstance(data, bytes):
+ data_as_str = data.decode(encoding="utf-8-sig")
+ else:
+ # Explain to mypy the correct type.
+ data_as_str = cast(str, data)
+
+ # Remove Byte Order Mark if present in string
+ data_as_str = data_as_str.lstrip(_BOM)
+
+ if content_type is None:
+ return data
+
+ if cls.JSON_REGEXP.match(content_type):
+ try:
+ return json.loads(data_as_str)
+ except ValueError as err:
+ raise DeserializationError("JSON is invalid: {}".format(err), err) from err
+ elif "xml" in (content_type or []):
+ try:
+
+ try:
+ if isinstance(data, unicode): # type: ignore
+ # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string
+ data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore
+ except NameError:
+ pass
+
+ return ET.fromstring(data_as_str) # nosec
+ except ET.ParseError as err:
+ # It might be because the server has an issue, and returned JSON with
+ # content-type XML....
+ # So let's try a JSON load, and if it's still broken
+ # let's flow the initial exception
+ def _json_attemp(data):
+ try:
+ return True, json.loads(data)
+ except ValueError:
+ return False, None # Don't care about this one
+
+ success, json_result = _json_attemp(data)
+ if success:
+ return json_result
+ # If i'm here, it's not JSON, it's not XML, let's scream
+ # and raise the last context in this block (the XML exception)
+ # The function hack is because Py2.7 messes up with exception
+ # context otherwise.
+ _LOGGER.critical("Wasn't XML not JSON, failing")
+ raise DeserializationError("XML is invalid") from err
+ elif content_type.startswith("text/"):
+ return data_as_str
+ raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
+
+ @classmethod
+ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any:
+ """Deserialize from HTTP response.
+
+ Use bytes and headers to NOT use any requests/aiohttp or whatever
+ specific implementation.
+ Headers will tested for "content-type"
+
+ :param bytes body_bytes: The body of the response.
+ :param dict headers: The headers of the response.
+ :returns: The deserialized data.
+ :rtype: object
+ """
+ # Try to use content-type from headers if available
+ content_type = None
+ if "content-type" in headers:
+ content_type = headers["content-type"].split(";")[0].strip().lower()
+ # Ouch, this server did not declare what it sent...
+ # Let's guess it's JSON...
+ # Also, since Autorest was considering that an empty body was a valid JSON,
+ # need that test as well....
+ else:
+ content_type = "application/json"
+
+ if body_bytes:
+ return cls.deserialize_from_text(body_bytes, content_type)
+ return None
+
+
+_LOGGER = logging.getLogger(__name__)
+
+try:
+ _long_type = long # type: ignore
+except NameError:
+ _long_type = int
+
+TZ_UTC = datetime.timezone.utc
+
+_FLATTEN = re.compile(r"(? None:
+ self.additional_properties: Optional[dict[str, Any]] = {}
+ for k in kwargs: # pylint: disable=consider-using-dict-items
+ if k not in self._attribute_map:
+ _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__)
+ elif k in self._validation and self._validation[k].get("readonly", False):
+ _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__)
+ else:
+ setattr(self, k, kwargs[k])
+
+ def __eq__(self, other: Any) -> bool:
+ """Compare objects by comparing all attributes.
+
+ :param object other: The object to compare
+ :returns: True if objects are equal
+ :rtype: bool
+ """
+ if isinstance(other, self.__class__):
+ return self.__dict__ == other.__dict__
+ return False
+
+ def __ne__(self, other: Any) -> bool:
+ """Compare objects by comparing all attributes.
+
+ :param object other: The object to compare
+ :returns: True if objects are not equal
+ :rtype: bool
+ """
+ return not self.__eq__(other)
+
+ def __str__(self) -> str:
+ return str(self.__dict__)
+
+ @classmethod
+ def enable_additional_properties_sending(cls) -> None:
+ cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"}
+
+ @classmethod
+ def is_xml_model(cls) -> bool:
+ try:
+ cls._xml_map # type: ignore
+ except AttributeError:
+ return False
+ return True
+
+ @classmethod
+ def _create_xml_node(cls):
+ """Create XML node.
+
+ :returns: The XML node
+ :rtype: xml.etree.ElementTree.Element
+ """
+ try:
+ xml_map = cls._xml_map # type: ignore
+ except AttributeError:
+ xml_map = {}
+
+ return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None))
+
+ def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
+ """Return the JSON that would be sent to server from this model.
+
+ This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
+
+ If you want XML serialization, you can pass the kwargs is_xml=True.
+
+ :param bool keep_readonly: If you want to serialize the readonly attributes
+ :returns: A dict JSON compatible object
+ :rtype: dict
+ """
+ serializer = Serializer(self._infer_class_models())
+ return serializer._serialize( # type: ignore # pylint: disable=protected-access
+ self, keep_readonly=keep_readonly, **kwargs
+ )
+
+ def as_dict(
+ self,
+ keep_readonly: bool = True,
+ key_transformer: Callable[[str, dict[str, Any], Any], Any] = attribute_transformer,
+ **kwargs: Any
+ ) -> JSON:
+ """Return a dict that can be serialized using json.dump.
+
+ Advanced usage might optionally use a callback as parameter:
+
+ .. code::python
+
+ def my_key_transformer(key, attr_desc, value):
+ return key
+
+ Key is the attribute name used in Python. Attr_desc
+ is a dict of metadata. Currently contains 'type' with the
+ msrest type and 'key' with the RestAPI encoded key.
+ Value is the current value in this object.
+
+ The string returned will be used to serialize the key.
+ If the return type is a list, this is considered hierarchical
+ result dict.
+
+ See the three examples in this file:
+
+ - attribute_transformer
+ - full_restapi_key_transformer
+ - last_restapi_key_transformer
+
+ If you want XML serialization, you can pass the kwargs is_xml=True.
+
+ :param bool keep_readonly: If you want to serialize the readonly attributes
+ :param function key_transformer: A key transformer function.
+ :returns: A dict JSON compatible object
+ :rtype: dict
+ """
+ serializer = Serializer(self._infer_class_models())
+ return serializer._serialize( # type: ignore # pylint: disable=protected-access
+ self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs
+ )
+
+ @classmethod
+ def _infer_class_models(cls):
+ try:
+ str_models = cls.__module__.rsplit(".", 1)[0]
+ models = sys.modules[str_models]
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ if cls.__name__ not in client_models:
+ raise ValueError("Not Autorest generated code")
+ except Exception: # pylint: disable=broad-exception-caught
+ # Assume it's not Autorest generated (tests?). Add ourselves as dependencies.
+ client_models = {cls.__name__: cls}
+ return client_models
+
+ @classmethod
+ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self:
+ """Parse a str using the RestAPI syntax and return a model.
+
+ :param str data: A str using RestAPI structure. JSON by default.
+ :param str content_type: JSON by default, set application/xml if XML.
+ :returns: An instance of this model
+ :raises DeserializationError: if something went wrong
+ :rtype: Self
+ """
+ deserializer = Deserializer(cls._infer_class_models())
+ return deserializer(cls.__name__, data, content_type=content_type) # type: ignore
+
+ @classmethod
+ def from_dict(
+ cls,
+ data: Any,
+ key_extractors: Optional[Callable[[str, dict[str, Any], Any], Any]] = None,
+ content_type: Optional[str] = None,
+ ) -> Self:
+ """Parse a dict using given key extractor return a model.
+
+ By default consider key
+ extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor
+ and last_rest_key_case_insensitive_extractor)
+
+ :param dict data: A dict using RestAPI structure
+ :param function key_extractors: A key extractor function.
+ :param str content_type: JSON by default, set application/xml if XML.
+ :returns: An instance of this model
+ :raises DeserializationError: if something went wrong
+ :rtype: Self
+ """
+ deserializer = Deserializer(cls._infer_class_models())
+ deserializer.key_extractors = ( # type: ignore
+ [ # type: ignore
+ attribute_key_case_insensitive_extractor,
+ rest_key_case_insensitive_extractor,
+ last_rest_key_case_insensitive_extractor,
+ ]
+ if key_extractors is None
+ else key_extractors
+ )
+ return deserializer(cls.__name__, data, content_type=content_type) # type: ignore
+
+ @classmethod
+ def _flatten_subtype(cls, key, objects):
+ if "_subtype_map" not in cls.__dict__:
+ return {}
+ result = dict(cls._subtype_map[key])
+ for valuetype in cls._subtype_map[key].values():
+ result |= objects[valuetype]._flatten_subtype(key, objects) # pylint: disable=protected-access
+ return result
+
+ @classmethod
+ def _classify(cls, response, objects):
+ """Check the class _subtype_map for any child classes.
+ We want to ignore any inherited _subtype_maps.
+
+ :param dict response: The initial data
+ :param dict objects: The class objects
+ :returns: The class to be used
+ :rtype: class
+ """
+ for subtype_key in cls.__dict__.get("_subtype_map", {}).keys():
+ subtype_value = None
+
+ if not isinstance(response, ET.Element):
+ rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1]
+ subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None)
+ else:
+ subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response)
+ if subtype_value:
+ # Try to match base class. Can be class name only
+ # (bug to fix in Autorest to support x-ms-discriminator-name)
+ if cls.__name__ == subtype_value:
+ return cls
+ flatten_mapping_type = cls._flatten_subtype(subtype_key, objects)
+ try:
+ return objects[flatten_mapping_type[subtype_value]] # type: ignore
+ except KeyError:
+ _LOGGER.warning(
+ "Subtype value %s has no mapping, use base class %s.",
+ subtype_value,
+ cls.__name__,
+ )
+ break
+ else:
+ _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__)
+ break
+ return cls
+
+ @classmethod
+ def _get_rest_key_parts(cls, attr_key):
+ """Get the RestAPI key of this attr, split it and decode part
+ :param str attr_key: Attribute key must be in attribute_map.
+ :returns: A list of RestAPI part
+ :rtype: list
+ """
+ rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"])
+ return [_decode_attribute_map_key(key_part) for key_part in rest_split_key]
+
+
+def _decode_attribute_map_key(key):
+ """This decode a key in an _attribute_map to the actual key we want to look at
+ inside the received data.
+
+ :param str key: A key string from the generated code
+ :returns: The decoded key
+ :rtype: str
+ """
+ return key.replace("\\.", ".")
+
+
+class Serializer: # pylint: disable=too-many-public-methods
+ """Request object model serializer."""
+
+ basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+ _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()}
+ days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"}
+ months = {
+ 1: "Jan",
+ 2: "Feb",
+ 3: "Mar",
+ 4: "Apr",
+ 5: "May",
+ 6: "Jun",
+ 7: "Jul",
+ 8: "Aug",
+ 9: "Sep",
+ 10: "Oct",
+ 11: "Nov",
+ 12: "Dec",
+ }
+ validation = {
+ "min_length": lambda x, y: len(x) < y,
+ "max_length": lambda x, y: len(x) > y,
+ "minimum": lambda x, y: x < y,
+ "maximum": lambda x, y: x > y,
+ "minimum_ex": lambda x, y: x <= y,
+ "maximum_ex": lambda x, y: x >= y,
+ "min_items": lambda x, y: len(x) < y,
+ "max_items": lambda x, y: len(x) > y,
+ "pattern": lambda x, y: not re.match(y, x, re.UNICODE),
+ "unique": lambda x, y: len(x) != len(set(x)),
+ "multiple": lambda x, y: x % y != 0,
+ }
+
+ def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+ self.serialize_type = {
+ "iso-8601": Serializer.serialize_iso,
+ "rfc-1123": Serializer.serialize_rfc,
+ "unix-time": Serializer.serialize_unix,
+ "duration": Serializer.serialize_duration,
+ "date": Serializer.serialize_date,
+ "time": Serializer.serialize_time,
+ "decimal": Serializer.serialize_decimal,
+ "long": Serializer.serialize_long,
+ "bytearray": Serializer.serialize_bytearray,
+ "base64": Serializer.serialize_base64,
+ "object": self.serialize_object,
+ "[]": self.serialize_iter,
+ "{}": self.serialize_dict,
+ }
+ self.dependencies: dict[str, type] = dict(classes) if classes else {}
+ self.key_transformer = full_restapi_key_transformer
+ self.client_side_validation = True
+
+ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals
+ self, target_obj, data_type=None, **kwargs
+ ):
+ """Serialize data into a string according to type.
+
+ :param object target_obj: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: str, dict
+ :raises SerializationError: if serialization fails.
+ :returns: The serialized data.
+ """
+ key_transformer = kwargs.get("key_transformer", self.key_transformer)
+ keep_readonly = kwargs.get("keep_readonly", False)
+ if target_obj is None:
+ return None
+
+ attr_name = None
+ class_name = target_obj.__class__.__name__
+
+ if data_type:
+ return self.serialize_data(target_obj, data_type, **kwargs)
+
+ if not hasattr(target_obj, "_attribute_map"):
+ data_type = type(target_obj).__name__
+ if data_type in self.basic_types.values():
+ return self.serialize_data(target_obj, data_type, **kwargs)
+
+ # Force "is_xml" kwargs if we detect a XML model
+ try:
+ is_xml_model_serialization = kwargs["is_xml"]
+ except KeyError:
+ is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model())
+
+ serialized = {}
+ if is_xml_model_serialization:
+ serialized = target_obj._create_xml_node() # pylint: disable=protected-access
+ try:
+ attributes = target_obj._attribute_map # pylint: disable=protected-access
+ for attr, attr_desc in attributes.items():
+ attr_name = attr
+ if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access
+ attr_name, {}
+ ).get("readonly", False):
+ continue
+
+ if attr_name == "additional_properties" and attr_desc["key"] == "":
+ if target_obj.additional_properties is not None:
+ serialized |= target_obj.additional_properties
+ continue
+ try:
+
+ orig_attr = getattr(target_obj, attr)
+ if is_xml_model_serialization:
+ pass # Don't provide "transformer" for XML for now. Keep "orig_attr"
+ else: # JSON
+ keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr)
+ keys = keys if isinstance(keys, list) else [keys]
+
+ kwargs["serialization_ctxt"] = attr_desc
+ new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs)
+
+ if is_xml_model_serialization:
+ xml_desc = attr_desc.get("xml", {})
+ xml_name = xml_desc.get("name", attr_desc["key"])
+ xml_prefix = xml_desc.get("prefix", None)
+ xml_ns = xml_desc.get("ns", None)
+ if xml_desc.get("attr", False):
+ if xml_ns:
+ ET.register_namespace(xml_prefix, xml_ns)
+ xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+ serialized.set(xml_name, new_attr) # type: ignore
+ continue
+ if xml_desc.get("text", False):
+ serialized.text = new_attr # type: ignore
+ continue
+ if isinstance(new_attr, list):
+ serialized.extend(new_attr) # type: ignore
+ elif isinstance(new_attr, ET.Element):
+ # If the down XML has no XML/Name,
+ # we MUST replace the tag with the local tag. But keeping the namespaces.
+ if "name" not in getattr(orig_attr, "_xml_map", {}):
+ splitted_tag = new_attr.tag.split("}")
+ if len(splitted_tag) == 2: # Namespace
+ new_attr.tag = "}".join([splitted_tag[0], xml_name])
+ else:
+ new_attr.tag = xml_name
+ serialized.append(new_attr) # type: ignore
+ else: # That's a basic type
+ # Integrate namespace if necessary
+ local_node = _create_xml_node(xml_name, xml_prefix, xml_ns)
+ local_node.text = str(new_attr)
+ serialized.append(local_node) # type: ignore
+ else: # JSON
+ for k in reversed(keys): # type: ignore
+ new_attr = {k: new_attr}
+
+ _new_attr = new_attr
+ _serialized = serialized
+ for k in keys: # type: ignore
+ if k not in _serialized:
+ _serialized.update(_new_attr) # type: ignore
+ _new_attr = _new_attr[k] # type: ignore
+ _serialized = _serialized[k]
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
+
+ except (AttributeError, KeyError, TypeError) as err:
+ msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
+ raise SerializationError(msg) from err
+ return serialized
+
+ def body(self, data, data_type, **kwargs):
+ """Serialize data intended for a request body.
+
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: dict
+ :raises SerializationError: if serialization fails.
+ :raises ValueError: if data is None
+ :returns: The serialized request body
+ """
+
+ # Just in case this is a dict
+ internal_data_type_str = data_type.strip("[]{}")
+ internal_data_type = self.dependencies.get(internal_data_type_str, None)
+ try:
+ is_xml_model_serialization = kwargs["is_xml"]
+ except KeyError:
+ if internal_data_type and issubclass(internal_data_type, Model):
+ is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model())
+ else:
+ is_xml_model_serialization = False
+ if internal_data_type and not isinstance(internal_data_type, Enum):
+ try:
+ deserializer = Deserializer(self.dependencies)
+ # Since it's on serialization, it's almost sure that format is not JSON REST
+ # We're not able to deal with additional properties for now.
+ deserializer.additional_properties_detection = False
+ if is_xml_model_serialization:
+ deserializer.key_extractors = [ # type: ignore
+ attribute_key_case_insensitive_extractor,
+ ]
+ else:
+ deserializer.key_extractors = [
+ rest_key_case_insensitive_extractor,
+ attribute_key_case_insensitive_extractor,
+ last_rest_key_case_insensitive_extractor,
+ ]
+ data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access
+ except DeserializationError as err:
+ raise SerializationError("Unable to build a model: " + str(err)) from err
+
+ return self._serialize(data, data_type, **kwargs)
+
+ def url(self, name, data, data_type, **kwargs):
+ """Serialize data intended for a URL path.
+
+ :param str name: The name of the URL path parameter.
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: str
+ :returns: The serialized URL path
+ :raises TypeError: if serialization fails.
+ :raises ValueError: if data is None
+ """
+ try:
+ output = self.serialize_data(data, data_type, **kwargs)
+ if data_type == "bool":
+ output = json.dumps(output)
+
+ if kwargs.get("skip_quote") is True:
+ output = str(output)
+ output = output.replace("{", quote("{")).replace("}", quote("}"))
+ else:
+ output = quote(str(output), safe="")
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return output
+
+ def query(self, name, data, data_type, **kwargs):
+ """Serialize data intended for a URL query.
+
+ :param str name: The name of the query parameter.
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: str, list
+ :raises TypeError: if serialization fails.
+ :raises ValueError: if data is None
+ :returns: The serialized query parameter
+ """
+ try:
+ # Treat the list aside, since we don't want to encode the div separator
+ if data_type.startswith("["):
+ internal_data_type = data_type[1:-1]
+ do_quote = not kwargs.get("skip_quote", False)
+ return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs)
+
+ # Not a list, regular serialization
+ output = self.serialize_data(data, data_type, **kwargs)
+ if data_type == "bool":
+ output = json.dumps(output)
+ if kwargs.get("skip_quote") is True:
+ output = str(output)
+ else:
+ output = quote(str(output), safe="")
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return str(output)
+
+ def header(self, name, data, data_type, **kwargs):
+ """Serialize data intended for a request header.
+
+ :param str name: The name of the header.
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: str
+ :raises TypeError: if serialization fails.
+ :raises ValueError: if data is None
+ :returns: The serialized header
+ """
+ try:
+ if data_type in ["[str]"]:
+ data = ["" if d is None else d for d in data]
+
+ output = self.serialize_data(data, data_type, **kwargs)
+ if data_type == "bool":
+ output = json.dumps(output)
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return str(output)
+
+ def serialize_data(self, data, data_type, **kwargs):
+ """Serialize generic data according to supplied data type.
+
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :raises AttributeError: if required data is None.
+ :raises ValueError: if data is None
+ :raises SerializationError: if serialization fails.
+ :returns: The serialized data.
+ :rtype: str, int, float, bool, dict, list
+ """
+ if data is None:
+ raise ValueError("No value for given attribute")
+
+ try:
+ if data is CoreNull:
+ return None
+ if data_type in self.basic_types.values():
+ return self.serialize_basic(data, data_type, **kwargs)
+
+ if data_type in self.serialize_type:
+ return self.serialize_type[data_type](data, **kwargs)
+
+ # If dependencies is empty, try with current data class
+ # It has to be a subclass of Enum anyway
+ enum_type = self.dependencies.get(data_type, cast(type, data.__class__))
+ if issubclass(enum_type, Enum):
+ return Serializer.serialize_enum(data, enum_obj=enum_type)
+
+ iter_type = data_type[0] + data_type[-1]
+ if iter_type in self.serialize_type:
+ return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs)
+
+ except (ValueError, TypeError) as err:
+ msg = "Unable to serialize value: {!r} as type: {!r}."
+ raise SerializationError(msg.format(data, data_type)) from err
+ return self._serialize(data, **kwargs)
+
+ @classmethod
+ def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements
+ custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type)
+ if custom_serializer:
+ return custom_serializer
+ if kwargs.get("is_xml", False):
+ return cls._xml_basic_types_serializers.get(data_type)
+
+ @classmethod
+ def serialize_basic(cls, data, data_type, **kwargs):
+ """Serialize basic builting data type.
+ Serializes objects to str, int, float or bool.
+
+ Possible kwargs:
+ - basic_types_serializers dict[str, callable] : If set, use the callable as serializer
+ - is_xml bool : If set, use xml_basic_types_serializers
+
+ :param obj data: Object to be serialized.
+ :param str data_type: Type of object in the iterable.
+ :rtype: str, int, float, bool
+ :return: serialized object
+ """
+ custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
+ if custom_serializer:
+ return custom_serializer(data)
+ if data_type == "str":
+ return cls.serialize_unicode(data)
+ return eval(data_type)(data) # nosec # pylint: disable=eval-used
+
+ @classmethod
+ def serialize_unicode(cls, data):
+ """Special handling for serializing unicode strings in Py2.
+ Encode to UTF-8 if unicode, otherwise handle as a str.
+
+ :param str data: Object to be serialized.
+ :rtype: str
+ :return: serialized object
+ """
+ try: # If I received an enum, return its value
+ return data.value
+ except AttributeError:
+ pass
+
+ try:
+ if isinstance(data, unicode): # type: ignore
+ # Don't change it, JSON and XML ElementTree are totally able
+ # to serialize correctly u'' strings
+ return data
+ except NameError:
+ return str(data)
+ return str(data)
+
+ def serialize_iter(self, data, iter_type, div=None, **kwargs):
+ """Serialize iterable.
+
+ Supported kwargs:
+ - serialization_ctxt dict : The current entry of _attribute_map, or same format.
+ serialization_ctxt['type'] should be same as data_type.
+ - is_xml bool : If set, serialize as XML
+
+ :param list data: Object to be serialized.
+ :param str iter_type: Type of object in the iterable.
+ :param str div: If set, this str will be used to combine the elements
+ in the iterable into a combined string. Default is 'None'.
+ Defaults to False.
+ :rtype: list, str
+ :return: serialized iterable
+ """
+ if isinstance(data, str):
+ raise SerializationError("Refuse str type as a valid iter type.")
+
+ serialization_ctxt = kwargs.get("serialization_ctxt", {})
+ is_xml = kwargs.get("is_xml", False)
+
+ serialized = []
+ for d in data:
+ try:
+ serialized.append(self.serialize_data(d, iter_type, **kwargs))
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
+ serialized.append(None)
+
+ if kwargs.get("do_quote", False):
+ serialized = ["" if s is None else quote(str(s), safe="") for s in serialized]
+
+ if div:
+ serialized = ["" if s is None else str(s) for s in serialized]
+ serialized = div.join(serialized)
+
+ if "xml" in serialization_ctxt or is_xml:
+ # XML serialization is more complicated
+ xml_desc = serialization_ctxt.get("xml", {})
+ xml_name = xml_desc.get("name")
+ if not xml_name:
+ xml_name = serialization_ctxt["key"]
+
+ # Create a wrap node if necessary (use the fact that Element and list have "append")
+ is_wrapped = xml_desc.get("wrapped", False)
+ node_name = xml_desc.get("itemsName", xml_name)
+ if is_wrapped:
+ final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+ else:
+ final_result = []
+ # All list elements to "local_node"
+ for el in serialized:
+ if isinstance(el, ET.Element):
+ el_node = el
+ else:
+ el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+ if el is not None: # Otherwise it writes "None" :-p
+ el_node.text = str(el)
+ final_result.append(el_node)
+ return final_result
+ return serialized
+
+ def serialize_dict(self, attr, dict_type, **kwargs):
+ """Serialize a dictionary of objects.
+
+ :param dict attr: Object to be serialized.
+ :param str dict_type: Type of object in the dictionary.
+ :rtype: dict
+ :return: serialized dictionary
+ """
+ serialization_ctxt = kwargs.get("serialization_ctxt", {})
+ serialized = {}
+ for key, value in attr.items():
+ try:
+ serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs)
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
+ serialized[self.serialize_unicode(key)] = None
+
+ if "xml" in serialization_ctxt:
+ # XML serialization is more complicated
+ xml_desc = serialization_ctxt["xml"]
+ xml_name = xml_desc["name"]
+
+ final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+ for key, value in serialized.items():
+ ET.SubElement(final_result, key).text = value
+ return final_result
+
+ return serialized
+
+ def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements
+ """Serialize a generic object.
+ This will be handled as a dictionary. If object passed in is not
+ a basic type (str, int, float, dict, list) it will simply be
+ cast to str.
+
+ :param dict attr: Object to be serialized.
+ :rtype: dict or str
+ :return: serialized object
+ """
+ if attr is None:
+ return None
+ if isinstance(attr, ET.Element):
+ return attr
+ obj_type = type(attr)
+ if obj_type in self.basic_types:
+ return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs)
+ if obj_type is _long_type:
+ return self.serialize_long(attr)
+ if obj_type is str:
+ return self.serialize_unicode(attr)
+ if obj_type is datetime.datetime:
+ return self.serialize_iso(attr)
+ if obj_type is datetime.date:
+ return self.serialize_date(attr)
+ if obj_type is datetime.time:
+ return self.serialize_time(attr)
+ if obj_type is datetime.timedelta:
+ return self.serialize_duration(attr)
+ if obj_type is decimal.Decimal:
+ return self.serialize_decimal(attr)
+
+ # If it's a model or I know this dependency, serialize as a Model
+ if obj_type in self.dependencies.values() or isinstance(attr, Model):
+ return self._serialize(attr)
+
+ if obj_type == dict:
+ serialized = {}
+ for key, value in attr.items():
+ try:
+ serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs)
+ except ValueError:
+ serialized[self.serialize_unicode(key)] = None
+ return serialized
+
+ if obj_type == list:
+ serialized = []
+ for obj in attr:
+ try:
+ serialized.append(self.serialize_object(obj, **kwargs))
+ except ValueError:
+ pass
+ return serialized
+ return str(attr)
+
+ @staticmethod
+ def serialize_enum(attr, enum_obj=None):
+ try:
+ result = attr.value
+ except AttributeError:
+ result = attr
+ try:
+ enum_obj(result) # type: ignore
+ return result
+ except ValueError as exc:
+ for enum_value in enum_obj: # type: ignore
+ if enum_value.value.lower() == str(attr).lower():
+ return enum_value.value
+ error = "{!r} is not valid value for enum {!r}"
+ raise SerializationError(error.format(attr, enum_obj)) from exc
+
+ @staticmethod
+ def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize bytearray into base-64 string.
+
+ :param str attr: Object to be serialized.
+ :rtype: str
+ :return: serialized base64
+ """
+ return b64encode(attr).decode()
+
+ @staticmethod
+ def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize str into base-64 string.
+
+ :param str attr: Object to be serialized.
+ :rtype: str
+ :return: serialized base64
+ """
+ encoded = b64encode(attr).decode("ascii")
+ return encoded.strip("=").replace("+", "-").replace("/", "_")
+
+ @staticmethod
+ def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Decimal object to float.
+
+ :param decimal attr: Object to be serialized.
+ :rtype: float
+ :return: serialized decimal
+ """
+ return float(attr)
+
+ @staticmethod
+ def serialize_long(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize long (Py2) or int (Py3).
+
+ :param int attr: Object to be serialized.
+ :rtype: int/long
+ :return: serialized long
+ """
+ return _long_type(attr)
+
+ @staticmethod
+ def serialize_date(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Date object into ISO-8601 formatted string.
+
+ :param Date attr: Object to be serialized.
+ :rtype: str
+ :return: serialized date
+ """
+ if isinstance(attr, str):
+ attr = isodate.parse_date(attr)
+ t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
+ return t
+
+ @staticmethod
+ def serialize_time(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Time object into ISO-8601 formatted string.
+
+ :param datetime.time attr: Object to be serialized.
+ :rtype: str
+ :return: serialized time
+ """
+ if isinstance(attr, str):
+ attr = isodate.parse_time(attr)
+ t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second)
+ if attr.microsecond:
+ t += ".{:02}".format(attr.microsecond)
+ return t
+
+ @staticmethod
+ def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize TimeDelta object into ISO-8601 formatted string.
+
+ :param TimeDelta attr: Object to be serialized.
+ :rtype: str
+ :return: serialized duration
+ """
+ if isinstance(attr, str):
+ attr = isodate.parse_duration(attr)
+ return isodate.duration_isoformat(attr)
+
+ @staticmethod
+ def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Datetime object into RFC-1123 formatted string.
+
+ :param Datetime attr: Object to be serialized.
+ :rtype: str
+ :raises TypeError: if format invalid.
+ :return: serialized rfc
+ """
+ try:
+ if not attr.tzinfo:
+ _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+ utc = attr.utctimetuple()
+ except AttributeError as exc:
+ raise TypeError("RFC1123 object must be valid Datetime object.") from exc
+
+ return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
+ Serializer.days[utc.tm_wday],
+ utc.tm_mday,
+ Serializer.months[utc.tm_mon],
+ utc.tm_year,
+ utc.tm_hour,
+ utc.tm_min,
+ utc.tm_sec,
+ )
+
+ @staticmethod
+ def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Datetime object into ISO-8601 formatted string.
+
+ :param Datetime attr: Object to be serialized.
+ :rtype: str
+ :raises SerializationError: if format invalid.
+ :return: serialized iso
+ """
+ if isinstance(attr, str):
+ attr = isodate.parse_datetime(attr)
+ try:
+ if not attr.tzinfo:
+ _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+ utc = attr.utctimetuple()
+ if utc.tm_year > 9999 or utc.tm_year < 1:
+ raise OverflowError("Hit max or min date")
+
+ microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0")
+ if microseconds:
+ microseconds = "." + microseconds
+ date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
+ utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec
+ )
+ return date + microseconds + "Z"
+ except (ValueError, OverflowError) as err:
+ msg = "Unable to serialize datetime object."
+ raise SerializationError(msg) from err
+ except AttributeError as err:
+ msg = "ISO-8601 object must be valid Datetime object."
+ raise TypeError(msg) from err
+
+ @staticmethod
+ def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Datetime object into IntTime format.
+ This is represented as seconds.
+
+ :param Datetime attr: Object to be serialized.
+ :rtype: int
+ :raises SerializationError: if format invalid
+ :return: serialied unix
+ """
+ if isinstance(attr, int):
+ return attr
+ try:
+ if not attr.tzinfo:
+ _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+ return int(calendar.timegm(attr.utctimetuple()))
+ except AttributeError as exc:
+ raise TypeError("Unix time object must be valid Datetime object.") from exc
+
+
+def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
+ key = attr_desc["key"]
+ working_data = data
+
+ while "." in key:
+ # Need the cast, as for some reasons "split" is typed as list[str | Any]
+ dict_keys = cast(list[str], _FLATTEN.split(key))
+ if len(dict_keys) == 1:
+ key = _decode_attribute_map_key(dict_keys[0])
+ break
+ working_key = _decode_attribute_map_key(dict_keys[0])
+ working_data = working_data.get(working_key, data)
+ if working_data is None:
+ # If at any point while following flatten JSON path see None, it means
+ # that all properties under are None as well
+ return None
+ key = ".".join(dict_keys[1:])
+
+ return working_data.get(key)
+
+
+def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements
+ attr, attr_desc, data
+):
+ key = attr_desc["key"]
+ working_data = data
+
+ while "." in key:
+ dict_keys = _FLATTEN.split(key)
+ if len(dict_keys) == 1:
+ key = _decode_attribute_map_key(dict_keys[0])
+ break
+ working_key = _decode_attribute_map_key(dict_keys[0])
+ working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data)
+ if working_data is None:
+ # If at any point while following flatten JSON path see None, it means
+ # that all properties under are None as well
+ return None
+ key = ".".join(dict_keys[1:])
+
+ if working_data:
+ return attribute_key_case_insensitive_extractor(key, None, working_data)
+
+
+def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
+ """Extract the attribute in "data" based on the last part of the JSON path key.
+
+ :param str attr: The attribute to extract
+ :param dict attr_desc: The attribute description
+ :param dict data: The data to extract from
+ :rtype: object
+ :returns: The extracted attribute
+ """
+ key = attr_desc["key"]
+ dict_keys = _FLATTEN.split(key)
+ return attribute_key_extractor(dict_keys[-1], None, data)
+
+
+def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
+ """Extract the attribute in "data" based on the last part of the JSON path key.
+
+ This is the case insensitive version of "last_rest_key_extractor"
+ :param str attr: The attribute to extract
+ :param dict attr_desc: The attribute description
+ :param dict data: The data to extract from
+ :rtype: object
+ :returns: The extracted attribute
+ """
+ key = attr_desc["key"]
+ dict_keys = _FLATTEN.split(key)
+ return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data)
+
+
+def attribute_key_extractor(attr, _, data):
+ return data.get(attr)
+
+
+def attribute_key_case_insensitive_extractor(attr, _, data):
+ found_key = None
+ lower_attr = attr.lower()
+ for key in data:
+ if lower_attr == key.lower():
+ found_key = key
+ break
+
+ return data.get(found_key)
+
+
+def _extract_name_from_internal_type(internal_type):
+ """Given an internal type XML description, extract correct XML name with namespace.
+
+ :param dict internal_type: An model type
+ :rtype: tuple
+ :returns: A tuple XML name + namespace dict
+ """
+ internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+ xml_name = internal_type_xml_map.get("name", internal_type.__name__)
+ xml_ns = internal_type_xml_map.get("ns", None)
+ if xml_ns:
+ xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+ return xml_name
+
+
+def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements
+ if isinstance(data, dict):
+ return None
+
+ # Test if this model is XML ready first
+ if not isinstance(data, ET.Element):
+ return None
+
+ xml_desc = attr_desc.get("xml", {})
+ xml_name = xml_desc.get("name", attr_desc["key"])
+
+ # Look for a children
+ is_iter_type = attr_desc["type"].startswith("[")
+ is_wrapped = xml_desc.get("wrapped", False)
+ internal_type = attr_desc.get("internalType", None)
+ internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+
+ # Integrate namespace if necessary
+ xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None))
+ if xml_ns:
+ xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+
+ # If it's an attribute, that's simple
+ if xml_desc.get("attr", False):
+ return data.get(xml_name)
+
+ # If it's x-ms-text, that's simple too
+ if xml_desc.get("text", False):
+ return data.text
+
+ # Scenario where I take the local name:
+ # - Wrapped node
+ # - Internal type is an enum (considered basic types)
+ # - Internal type has no XML/Name node
+ if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)):
+ children = data.findall(xml_name)
+ # If internal type has a local name and it's not a list, I use that name
+ elif not is_iter_type and internal_type and "name" in internal_type_xml_map:
+ xml_name = _extract_name_from_internal_type(internal_type)
+ children = data.findall(xml_name)
+ # That's an array
+ else:
+ if internal_type: # Complex type, ignore itemsName and use the complex type name
+ items_name = _extract_name_from_internal_type(internal_type)
+ else:
+ items_name = xml_desc.get("itemsName", xml_name)
+ children = data.findall(items_name)
+
+ if len(children) == 0:
+ if is_iter_type:
+ if is_wrapped:
+ return None # is_wrapped no node, we want None
+ return [] # not wrapped, assume empty list
+ return None # Assume it's not there, maybe an optional node.
+
+ # If is_iter_type and not wrapped, return all found children
+ if is_iter_type:
+ if not is_wrapped:
+ return children
+ # Iter and wrapped, should have found one node only (the wrap one)
+ if len(children) != 1:
+ raise DeserializationError(
+ "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format(
+ xml_name
+ )
+ )
+ return list(children[0]) # Might be empty list and that's ok.
+
+ # Here it's not a itertype, we should have found one element only or empty
+ if len(children) > 1:
+ raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name))
+ return children[0]
+
+
+class Deserializer:
+ """Response object model deserializer.
+
+ :param dict classes: Class type dictionary for deserializing complex types.
+ :ivar list key_extractors: Ordered list of extractors to be used by this deserializer.
+ """
+
+ basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+ valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+
+ def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+ self.deserialize_type = {
+ "iso-8601": Deserializer.deserialize_iso,
+ "rfc-1123": Deserializer.deserialize_rfc,
+ "unix-time": Deserializer.deserialize_unix,
+ "duration": Deserializer.deserialize_duration,
+ "date": Deserializer.deserialize_date,
+ "time": Deserializer.deserialize_time,
+ "decimal": Deserializer.deserialize_decimal,
+ "long": Deserializer.deserialize_long,
+ "bytearray": Deserializer.deserialize_bytearray,
+ "base64": Deserializer.deserialize_base64,
+ "object": self.deserialize_object,
+ "[]": self.deserialize_iter,
+ "{}": self.deserialize_dict,
+ }
+ self.deserialize_expected_types = {
+ "duration": (isodate.Duration, datetime.timedelta),
+ "iso-8601": (datetime.datetime),
+ }
+ self.dependencies: dict[str, type] = dict(classes) if classes else {}
+ self.key_extractors = [rest_key_extractor, xml_key_extractor]
+ # Additional properties only works if the "rest_key_extractor" is used to
+ # extract the keys. Making it to work whatever the key extractor is too much
+ # complicated, with no real scenario for now.
+ # So adding a flag to disable additional properties detection. This flag should be
+ # used if your expect the deserialization to NOT come from a JSON REST syntax.
+ # Otherwise, result are unexpected
+ self.additional_properties_detection = True
+
+ def __call__(self, target_obj, response_data, content_type=None):
+ """Call the deserializer to process a REST response.
+
+ :param str target_obj: Target data type to deserialize to.
+ :param requests.Response response_data: REST response object.
+ :param str content_type: Swagger "produces" if available.
+ :raises DeserializationError: if deserialization fails.
+ :return: Deserialized object.
+ :rtype: object
+ """
+ data = self._unpack_content(response_data, content_type)
+ return self._deserialize(target_obj, data)
+
+ def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements
+ """Call the deserializer on a model.
+
+ Data needs to be already deserialized as JSON or XML ElementTree
+
+ :param str target_obj: Target data type to deserialize to.
+ :param object data: Object to deserialize.
+ :raises DeserializationError: if deserialization fails.
+ :return: Deserialized object.
+ :rtype: object
+ """
+ # This is already a model, go recursive just in case
+ if hasattr(data, "_attribute_map"):
+ constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")]
+ try:
+ for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access
+ if attr in constants:
+ continue
+ value = getattr(data, attr)
+ if value is None:
+ continue
+ local_type = mapconfig["type"]
+ internal_data_type = local_type.strip("[]{}")
+ if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum):
+ continue
+ setattr(data, attr, self._deserialize(local_type, value))
+ return data
+ except AttributeError:
+ return
+
+ response, class_name = self._classify_target(target_obj, data)
+
+ if isinstance(response, str):
+ return self.deserialize_data(data, response)
+ if isinstance(response, type) and issubclass(response, Enum):
+ return self.deserialize_enum(data, response)
+
+ if data is None or data is CoreNull:
+ return data
+ try:
+ attributes = response._attribute_map # type: ignore # pylint: disable=protected-access
+ d_attrs = {}
+ for attr, attr_desc in attributes.items():
+ # Check empty string. If it's not empty, someone has a real "additionalProperties"...
+ if attr == "additional_properties" and attr_desc["key"] == "":
+ continue
+ raw_value = None
+ # Enhance attr_desc with some dynamic data
+ attr_desc = attr_desc.copy() # Do a copy, do not change the real one
+ internal_data_type = attr_desc["type"].strip("[]{}")
+ if internal_data_type in self.dependencies:
+ attr_desc["internalType"] = self.dependencies[internal_data_type]
+
+ for key_extractor in self.key_extractors:
+ found_value = key_extractor(attr, attr_desc, data)
+ if found_value is not None:
+ if raw_value is not None and raw_value != found_value:
+ msg = (
+ "Ignoring extracted value '%s' from %s for key '%s'"
+ " (duplicate extraction, follow extractors order)"
+ )
+ _LOGGER.warning(msg, found_value, key_extractor, attr)
+ continue
+ raw_value = found_value
+
+ value = self.deserialize_data(raw_value, attr_desc["type"])
+ d_attrs[attr] = value
+ except (AttributeError, TypeError, KeyError) as err:
+ msg = "Unable to deserialize to object: " + class_name # type: ignore
+ raise DeserializationError(msg) from err
+ additional_properties = self._build_additional_properties(attributes, data)
+ return self._instantiate_model(response, d_attrs, additional_properties)
+
+ def _build_additional_properties(self, attribute_map, data):
+ if not self.additional_properties_detection:
+ return None
+ if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "":
+ # Check empty string. If it's not empty, someone has a real "additionalProperties"
+ return None
+ if isinstance(data, ET.Element):
+ data = {el.tag: el.text for el in data}
+
+ known_keys = {
+ _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0])
+ for desc in attribute_map.values()
+ if desc["key"] != ""
+ }
+ present_keys = set(data.keys())
+ missing_keys = present_keys - known_keys
+ return {key: data[key] for key in missing_keys}
+
+ def _classify_target(self, target, data):
+ """Check to see whether the deserialization target object can
+ be classified into a subclass.
+ Once classification has been determined, initialize object.
+
+ :param str target: The target object type to deserialize to.
+ :param str/dict data: The response data to deserialize.
+ :return: The classified target object and its class name.
+ :rtype: tuple
+ """
+ if target is None:
+ return None, None
+
+ if isinstance(target, str):
+ try:
+ target = self.dependencies[target]
+ except KeyError:
+ return target, target
+
+ try:
+ target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access
+ except AttributeError:
+ pass # Target is not a Model, no classify
+ return target, target.__class__.__name__ # type: ignore
+
+ def failsafe_deserialize(self, target_obj, data, content_type=None):
+ """Ignores any errors encountered in deserialization,
+ and falls back to not deserializing the object. Recommended
+ for use in error deserialization, as we want to return the
+ HttpResponseError to users, and not have them deal with
+ a deserialization error.
+
+ :param str target_obj: The target object type to deserialize to.
+ :param str/dict data: The response data to deserialize.
+ :param str content_type: Swagger "produces" if available.
+ :return: Deserialized object.
+ :rtype: object
+ """
+ try:
+ return self(target_obj, data, content_type=content_type)
+ except: # pylint: disable=bare-except
+ _LOGGER.debug(
+ "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
+ )
+ return None
+
+ @staticmethod
+ def _unpack_content(raw_data, content_type=None):
+ """Extract the correct structure for deserialization.
+
+ If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
+ if we can't, raise. Your Pipeline should have a RawDeserializer.
+
+ If not a pipeline response and raw_data is bytes or string, use content-type
+ to decode it. If no content-type, try JSON.
+
+ If raw_data is something else, bypass all logic and return it directly.
+
+ :param obj raw_data: Data to be processed.
+ :param str content_type: How to parse if raw_data is a string/bytes.
+ :raises JSONDecodeError: If JSON is requested and parsing is impossible.
+ :raises UnicodeDecodeError: If bytes is not UTF8
+ :rtype: object
+ :return: Unpacked content.
+ """
+ # Assume this is enough to detect a Pipeline Response without importing it
+ context = getattr(raw_data, "context", {})
+ if context:
+ if RawDeserializer.CONTEXT_NAME in context:
+ return context[RawDeserializer.CONTEXT_NAME]
+ raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize")
+
+ # Assume this is enough to recognize universal_http.ClientResponse without importing it
+ if hasattr(raw_data, "body"):
+ return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers)
+
+ # Assume this enough to recognize requests.Response without importing it.
+ if hasattr(raw_data, "_content_consumed"):
+ return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers)
+
+ if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"):
+ return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore
+ return raw_data
+
+ def _instantiate_model(self, response, attrs, additional_properties=None):
+ """Instantiate a response model passing in deserialized args.
+
+ :param Response response: The response model class.
+ :param dict attrs: The deserialized response attributes.
+ :param dict additional_properties: Additional properties to be set.
+ :rtype: Response
+ :return: The instantiated response model.
+ """
+ if callable(response):
+ subtype = getattr(response, "_subtype_map", {})
+ try:
+ readonly = [
+ k
+ for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore
+ if v.get("readonly")
+ ]
+ const = [
+ k
+ for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore
+ if v.get("constant")
+ ]
+ kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const}
+ response_obj = response(**kwargs)
+ for attr in readonly:
+ setattr(response_obj, attr, attrs.get(attr))
+ if additional_properties:
+ response_obj.additional_properties = additional_properties # type: ignore
+ return response_obj
+ except TypeError as err:
+ msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore
+ raise DeserializationError(msg + str(err)) from err
+ else:
+ try:
+ for attr, value in attrs.items():
+ setattr(response, attr, value)
+ return response
+ except Exception as exp:
+ msg = "Unable to populate response model. "
+ msg += "Type: {}, Error: {}".format(type(response), exp)
+ raise DeserializationError(msg) from exp
+
+ def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements
+ """Process data for deserialization according to data type.
+
+ :param str data: The response string to be deserialized.
+ :param str data_type: The type to deserialize to.
+ :raises DeserializationError: if deserialization fails.
+ :return: Deserialized object.
+ :rtype: object
+ """
+ if data is None:
+ return data
+
+ try:
+ if not data_type:
+ return data
+ if data_type in self.basic_types.values():
+ return self.deserialize_basic(data, data_type)
+ if data_type in self.deserialize_type:
+ if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
+ return data
+
+ is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment
+ "object",
+ "[]",
+ r"{}",
+ ]
+ if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text:
+ return None
+ data_val = self.deserialize_type[data_type](data)
+ return data_val
+
+ iter_type = data_type[0] + data_type[-1]
+ if iter_type in self.deserialize_type:
+ return self.deserialize_type[iter_type](data, data_type[1:-1])
+
+ obj_type = self.dependencies[data_type]
+ if issubclass(obj_type, Enum):
+ if isinstance(data, ET.Element):
+ data = data.text
+ return self.deserialize_enum(data, obj_type)
+
+ except (ValueError, TypeError, AttributeError) as err:
+ msg = "Unable to deserialize response data."
+ msg += " Data: {}, {}".format(data, data_type)
+ raise DeserializationError(msg) from err
+ return self._deserialize(obj_type, data)
+
+ def deserialize_iter(self, attr, iter_type):
+ """Deserialize an iterable.
+
+ :param list attr: Iterable to be deserialized.
+ :param str iter_type: The type of object in the iterable.
+ :return: Deserialized iterable.
+ :rtype: list
+ """
+ if attr is None:
+ return None
+ if isinstance(attr, ET.Element): # If I receive an element here, get the children
+ attr = list(attr)
+ if not isinstance(attr, (list, set)):
+ raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr)))
+ return [self.deserialize_data(a, iter_type) for a in attr]
+
+ def deserialize_dict(self, attr, dict_type):
+ """Deserialize a dictionary.
+
+ :param dict/list attr: Dictionary to be deserialized. Also accepts
+ a list of key, value pairs.
+ :param str dict_type: The object type of the items in the dictionary.
+ :return: Deserialized dictionary.
+ :rtype: dict
+ """
+ if isinstance(attr, list):
+ return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr}
+
+ if isinstance(attr, ET.Element):
+ # Transform value into {"Key": "value"}
+ attr = {el.tag: el.text for el in attr}
+ return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()}
+
+ def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements
+ """Deserialize a generic object.
+ This will be handled as a dictionary.
+
+ :param dict attr: Dictionary to be deserialized.
+ :return: Deserialized object.
+ :rtype: dict
+ :raises TypeError: if non-builtin datatype encountered.
+ """
+ if attr is None:
+ return None
+ if isinstance(attr, ET.Element):
+ # Do no recurse on XML, just return the tree as-is
+ return attr
+ if isinstance(attr, str):
+ return self.deserialize_basic(attr, "str")
+ obj_type = type(attr)
+ if obj_type in self.basic_types:
+ return self.deserialize_basic(attr, self.basic_types[obj_type])
+ if obj_type is _long_type:
+ return self.deserialize_long(attr)
+
+ if obj_type == dict:
+ deserialized = {}
+ for key, value in attr.items():
+ try:
+ deserialized[key] = self.deserialize_object(value, **kwargs)
+ except ValueError:
+ deserialized[key] = None
+ return deserialized
+
+ if obj_type == list:
+ deserialized = []
+ for obj in attr:
+ try:
+ deserialized.append(self.deserialize_object(obj, **kwargs))
+ except ValueError:
+ pass
+ return deserialized
+
+ error = "Cannot deserialize generic object with type: "
+ raise TypeError(error + str(obj_type))
+
+ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements
+ """Deserialize basic builtin data type from string.
+ Will attempt to convert to str, int, float and bool.
+ This function will also accept '1', '0', 'true' and 'false' as
+ valid bool values.
+
+ :param str attr: response string to be deserialized.
+ :param str data_type: deserialization data type.
+ :return: Deserialized basic type.
+ :rtype: str, int, float or bool
+ :raises TypeError: if string format is not valid.
+ """
+ # If we're here, data is supposed to be a basic type.
+ # If it's still an XML node, take the text
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ if not attr:
+ if data_type == "str":
+ # None or '', node is empty string.
+ return ""
+ # None or '', node with a strong type is None.
+ # Don't try to model "empty bool" or "empty int"
+ return None
+
+ if data_type == "bool":
+ if attr in [True, False, 1, 0]:
+ return bool(attr)
+ if isinstance(attr, str):
+ if attr.lower() in ["true", "1"]:
+ return True
+ if attr.lower() in ["false", "0"]:
+ return False
+ raise TypeError("Invalid boolean value: {}".format(attr))
+
+ if data_type == "str":
+ return self.deserialize_unicode(attr)
+ return eval(data_type)(attr) # nosec # pylint: disable=eval-used
+
+ @staticmethod
+ def deserialize_unicode(data):
+ """Preserve unicode objects in Python 2, otherwise return data
+ as a string.
+
+ :param str data: response string to be deserialized.
+ :return: Deserialized string.
+ :rtype: str or unicode
+ """
+ # We might be here because we have an enum modeled as string,
+ # and we try to deserialize a partial dict with enum inside
+ if isinstance(data, Enum):
+ return data
+
+ # Consider this is real string
+ try:
+ if isinstance(data, unicode): # type: ignore
+ return data
+ except NameError:
+ return str(data)
+ return str(data)
+
+ @staticmethod
+ def deserialize_enum(data, enum_obj):
+ """Deserialize string into enum object.
+
+ If the string is not a valid enum value it will be returned as-is
+ and a warning will be logged.
+
+ :param str data: Response string to be deserialized. If this value is
+ None or invalid it will be returned as-is.
+ :param Enum enum_obj: Enum object to deserialize to.
+ :return: Deserialized enum object.
+ :rtype: Enum
+ """
+ if isinstance(data, enum_obj) or data is None:
+ return data
+ if isinstance(data, Enum):
+ data = data.value
+ if isinstance(data, int):
+ # Workaround. We might consider remove it in the future.
+ try:
+ return list(enum_obj.__members__.values())[data]
+ except IndexError as exc:
+ error = "{!r} is not a valid index for enum {!r}"
+ raise DeserializationError(error.format(data, enum_obj)) from exc
+ try:
+ return enum_obj(str(data))
+ except ValueError:
+ for enum_value in enum_obj:
+ if enum_value.value.lower() == str(data).lower():
+ return enum_value
+ # We don't fail anymore for unknown value, we deserialize as a string
+ _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj)
+ return Deserializer.deserialize_unicode(data)
+
+ @staticmethod
+ def deserialize_bytearray(attr):
+ """Deserialize string into bytearray.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized bytearray
+ :rtype: bytearray
+ :raises TypeError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ return bytearray(b64decode(attr)) # type: ignore
+
+ @staticmethod
+ def deserialize_base64(attr):
+ """Deserialize base64 encoded string into string.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized base64 string
+ :rtype: bytearray
+ :raises TypeError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore
+ attr = attr + padding # type: ignore
+ encoded = attr.replace("-", "+").replace("_", "/")
+ return b64decode(encoded)
+
+ @staticmethod
+ def deserialize_decimal(attr):
+ """Deserialize string into Decimal object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized decimal
+ :raises DeserializationError: if string format invalid.
+ :rtype: decimal
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ try:
+ return decimal.Decimal(str(attr)) # type: ignore
+ except decimal.DecimalException as err:
+ msg = "Invalid decimal {}".format(attr)
+ raise DeserializationError(msg) from err
+
+ @staticmethod
+ def deserialize_long(attr):
+ """Deserialize string into long (Py2) or int (Py3).
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized int
+ :rtype: long or int
+ :raises ValueError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ return _long_type(attr) # type: ignore
+
+ @staticmethod
+ def deserialize_duration(attr):
+ """Deserialize ISO-8601 formatted string into TimeDelta object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized duration
+ :rtype: TimeDelta
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ try:
+ duration = isodate.parse_duration(attr)
+ except (ValueError, OverflowError, AttributeError) as err:
+ msg = "Cannot deserialize duration object."
+ raise DeserializationError(msg) from err
+ return duration
+
+ @staticmethod
+ def deserialize_date(attr):
+ """Deserialize ISO-8601 formatted string into Date object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized date
+ :rtype: Date
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore
+ raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+ # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
+ return isodate.parse_date(attr, defaultmonth=0, defaultday=0)
+
+ @staticmethod
+ def deserialize_time(attr):
+ """Deserialize ISO-8601 formatted string into time object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized time
+ :rtype: datetime.time
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore
+ raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+ return isodate.parse_time(attr)
+
+ @staticmethod
+ def deserialize_rfc(attr):
+ """Deserialize RFC-1123 formatted string into Datetime object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized RFC datetime
+ :rtype: Datetime
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ try:
+ parsed_date = email.utils.parsedate_tz(attr) # type: ignore
+ date_obj = datetime.datetime(
+ *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60))
+ )
+ if not date_obj.tzinfo:
+ date_obj = date_obj.astimezone(tz=TZ_UTC)
+ except ValueError as err:
+ msg = "Cannot deserialize to rfc datetime object."
+ raise DeserializationError(msg) from err
+ return date_obj
+
+ @staticmethod
+ def deserialize_iso(attr):
+ """Deserialize ISO-8601 formatted string into Datetime object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized ISO datetime
+ :rtype: Datetime
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ try:
+ attr = attr.upper() # type: ignore
+ match = Deserializer.valid_date.match(attr)
+ if not match:
+ raise ValueError("Invalid datetime string: " + attr)
+
+ check_decimal = attr.split(".")
+ if len(check_decimal) > 1:
+ decimal_str = ""
+ for digit in check_decimal[1]:
+ if digit.isdigit():
+ decimal_str += digit
+ else:
+ break
+ if len(decimal_str) > 6:
+ attr = attr.replace(decimal_str, decimal_str[0:6])
+
+ date_obj = isodate.parse_datetime(attr)
+ test_utc = date_obj.utctimetuple()
+ if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
+ raise OverflowError("Hit max or min date")
+ except (ValueError, OverflowError, AttributeError) as err:
+ msg = "Cannot deserialize datetime object."
+ raise DeserializationError(msg) from err
+ return date_obj
+
+ @staticmethod
+ def deserialize_unix(attr):
+ """Serialize Datetime object into IntTime format.
+ This is represented as seconds.
+
+ :param int attr: Object to be serialized.
+ :return: Deserialized datetime
+ :rtype: Datetime
+ :raises DeserializationError: if format invalid
+ """
+ if isinstance(attr, ET.Element):
+ attr = int(attr.text) # type: ignore
+ try:
+ attr = int(attr)
+ date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
+ except ValueError as err:
+ msg = "Cannot deserialize to unix datetime object."
+ raise DeserializationError(msg) from err
+ return date_obj
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_version.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_version.py
new file mode 100644
index 00000000000..0c7b9c7ab52
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/_version.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+VERSION = "40.2.0"
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/__init__.py
new file mode 100644
index 00000000000..a55668168f0
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/__init__.py
@@ -0,0 +1,29 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+from ._container_service_client import ContainerServiceClient # type: ignore
+
+try:
+ from ._patch import __all__ as _patch_all
+ from ._patch import *
+except ImportError:
+ _patch_all = []
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+ "ContainerServiceClient",
+]
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
+
+_patch_sdk()
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_configuration.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_configuration.py
new file mode 100644
index 00000000000..f67b2db6f29
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_configuration.py
@@ -0,0 +1,75 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Optional, TYPE_CHECKING
+
+from azure.core.pipeline import policies
+from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
+
+from .._version import VERSION
+
+if TYPE_CHECKING:
+ from azure.core import AzureClouds
+ from azure.core.credentials_async import AsyncTokenCredential
+
+
+class ContainerServiceClientConfiguration: # pylint: disable=too-many-instance-attributes
+ """Configuration for ContainerServiceClient.
+
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param credential: Credential needed for the client to connect to Azure. Required.
+ :type credential: ~azure.core.credentials_async.AsyncTokenCredential
+ :param subscription_id: The ID of the target subscription. The value must be an UUID. Required.
+ :type subscription_id: str
+ :param cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is
+ None.
+ :type cloud_setting: ~azure.core.AzureClouds
+ :keyword api_version: Api Version. Default value is "2025-10-01". Note that overriding this
+ default value may result in unsupported behavior.
+ :paramtype api_version: str
+ """
+
+ def __init__(
+ self,
+ credential: "AsyncTokenCredential",
+ subscription_id: str,
+ cloud_setting: Optional["AzureClouds"] = None,
+ **kwargs: Any
+ ) -> None:
+ api_version: str = kwargs.pop("api_version", "2025-10-01")
+
+ if credential is None:
+ raise ValueError("Parameter 'credential' must not be None.")
+ if subscription_id is None:
+ raise ValueError("Parameter 'subscription_id' must not be None.")
+
+ self.credential = credential
+ self.subscription_id = subscription_id
+ self.cloud_setting = cloud_setting
+ self.api_version = api_version
+ self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
+ kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION))
+ self.polling_interval = kwargs.get("polling_interval", 30)
+ self._configure(**kwargs)
+
+ def _configure(self, **kwargs: Any) -> None:
+ self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
+ self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
+ self.authentication_policy = kwargs.get("authentication_policy")
+ if self.credential and not self.authentication_policy:
+ self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
+ self.credential, *self.credential_scopes, **kwargs
+ )
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_container_service_client.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_container_service_client.py
new file mode 100644
index 00000000000..40bf44c3044
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_container_service_client.py
@@ -0,0 +1,202 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from copy import deepcopy
+from typing import Any, Awaitable, Optional, TYPE_CHECKING, cast
+from typing_extensions import Self
+
+from azure.core.pipeline import policies
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.settings import settings
+from azure.mgmt.core import AsyncARMPipelineClient
+from azure.mgmt.core.policies import AsyncARMAutoResourceProviderRegistrationPolicy
+from azure.mgmt.core.tools import get_arm_endpoints
+
+from .. import models as _models
+from .._utils.serialization import Deserializer, Serializer
+from ._configuration import ContainerServiceClientConfiguration
+from .operations import (
+ AgentPoolsOperations,
+ MachinesOperations,
+ MaintenanceConfigurationsOperations,
+ ManagedClustersOperations,
+ ManagedNamespacesOperations,
+ Operations,
+ PrivateEndpointConnectionsOperations,
+ PrivateLinkResourcesOperations,
+ ResolvePrivateLinkServiceIdOperations,
+ SnapshotsOperations,
+ TrustedAccessRoleBindingsOperations,
+ TrustedAccessRolesOperations,
+)
+
+if TYPE_CHECKING:
+ from azure.core import AzureClouds
+ from azure.core.credentials_async import AsyncTokenCredential
+
+
+class ContainerServiceClient: # pylint: disable=too-many-instance-attributes
+ """The Container Service Client.
+
+ :ivar operations: Operations operations
+ :vartype operations: azure.mgmt.containerservice.aio.operations.Operations
+ :ivar managed_clusters: ManagedClustersOperations operations
+ :vartype managed_clusters: azure.mgmt.containerservice.aio.operations.ManagedClustersOperations
+ :ivar maintenance_configurations: MaintenanceConfigurationsOperations operations
+ :vartype maintenance_configurations:
+ azure.mgmt.containerservice.aio.operations.MaintenanceConfigurationsOperations
+ :ivar managed_namespaces: ManagedNamespacesOperations operations
+ :vartype managed_namespaces:
+ azure.mgmt.containerservice.aio.operations.ManagedNamespacesOperations
+ :ivar agent_pools: AgentPoolsOperations operations
+ :vartype agent_pools: azure.mgmt.containerservice.aio.operations.AgentPoolsOperations
+ :ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
+ :vartype private_endpoint_connections:
+ azure.mgmt.containerservice.aio.operations.PrivateEndpointConnectionsOperations
+ :ivar private_link_resources: PrivateLinkResourcesOperations operations
+ :vartype private_link_resources:
+ azure.mgmt.containerservice.aio.operations.PrivateLinkResourcesOperations
+ :ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations
+ :vartype resolve_private_link_service_id:
+ azure.mgmt.containerservice.aio.operations.ResolvePrivateLinkServiceIdOperations
+ :ivar snapshots: SnapshotsOperations operations
+ :vartype snapshots: azure.mgmt.containerservice.aio.operations.SnapshotsOperations
+ :ivar trusted_access_role_bindings: TrustedAccessRoleBindingsOperations operations
+ :vartype trusted_access_role_bindings:
+ azure.mgmt.containerservice.aio.operations.TrustedAccessRoleBindingsOperations
+ :ivar trusted_access_roles: TrustedAccessRolesOperations operations
+ :vartype trusted_access_roles:
+ azure.mgmt.containerservice.aio.operations.TrustedAccessRolesOperations
+ :ivar machines: MachinesOperations operations
+ :vartype machines: azure.mgmt.containerservice.aio.operations.MachinesOperations
+ :param credential: Credential needed for the client to connect to Azure. Required.
+ :type credential: ~azure.core.credentials_async.AsyncTokenCredential
+ :param subscription_id: The ID of the target subscription. The value must be an UUID. Required.
+ :type subscription_id: str
+ :param base_url: Service URL. Default value is None.
+ :type base_url: str
+ :keyword cloud_setting: The cloud setting for which to get the ARM endpoint. Default value is
+ None.
+ :paramtype cloud_setting: ~azure.core.AzureClouds
+ :keyword api_version: Api Version. Default value is "2025-10-01". Note that overriding this
+ default value may result in unsupported behavior.
+ :paramtype api_version: str
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no
+ Retry-After header is present.
+ """
+
+ def __init__(
+ self,
+ credential: "AsyncTokenCredential",
+ subscription_id: str,
+ base_url: Optional[str] = None,
+ *,
+ cloud_setting: Optional["AzureClouds"] = None,
+ **kwargs: Any
+ ) -> None:
+ _cloud = cloud_setting or settings.current.azure_cloud # type: ignore
+ _endpoints = get_arm_endpoints(_cloud)
+ if not base_url:
+ base_url = _endpoints["resource_manager"]
+ credential_scopes = kwargs.pop("credential_scopes", _endpoints["credential_scopes"])
+ self._config = ContainerServiceClientConfiguration(
+ credential=credential,
+ subscription_id=subscription_id,
+ cloud_setting=cloud_setting,
+ credential_scopes=credential_scopes,
+ **kwargs
+ )
+
+ _policies = kwargs.pop("policies", None)
+ if _policies is None:
+ _policies = [
+ policies.RequestIdPolicy(**kwargs),
+ self._config.headers_policy,
+ self._config.user_agent_policy,
+ self._config.proxy_policy,
+ policies.ContentDecodePolicy(**kwargs),
+ AsyncARMAutoResourceProviderRegistrationPolicy(),
+ self._config.redirect_policy,
+ self._config.retry_policy,
+ self._config.authentication_policy,
+ self._config.custom_hook_policy,
+ self._config.logging_policy,
+ policies.DistributedTracingPolicy(**kwargs),
+ policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None,
+ self._config.http_logging_policy,
+ ]
+ self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(
+ base_url=cast(str, base_url), policies=_policies, **kwargs
+ )
+
+ client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+ self._serialize.client_side_validation = False
+ self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
+ self.managed_clusters = ManagedClustersOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.maintenance_configurations = MaintenanceConfigurationsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.managed_namespaces = ManagedNamespacesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.private_link_resources = PrivateLinkResourcesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.snapshots = SnapshotsOperations(self._client, self._config, self._serialize, self._deserialize)
+ self.trusted_access_role_bindings = TrustedAccessRoleBindingsOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.trusted_access_roles = TrustedAccessRolesOperations(
+ self._client, self._config, self._serialize, self._deserialize
+ )
+ self.machines = MachinesOperations(self._client, self._config, self._serialize, self._deserialize)
+
+ def _send_request(
+ self, request: HttpRequest, *, stream: bool = False, **kwargs: Any
+ ) -> Awaitable[AsyncHttpResponse]:
+ """Runs the network request through the client's chained policies.
+
+ >>> from azure.core.rest import HttpRequest
+ >>> request = HttpRequest("GET", "https://www.example.org/")
+
+ >>> response = await client._send_request(request)
+
+
+ For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
+
+ :param request: The network request you want to make. Required.
+ :type request: ~azure.core.rest.HttpRequest
+ :keyword bool stream: Whether the response payload will be streamed. Defaults to False.
+ :return: The response of your network call. Does not do error handling on your response.
+ :rtype: ~azure.core.rest.AsyncHttpResponse
+ """
+
+ request_copy = deepcopy(request)
+ request_copy.url = self._client.format_url(request_copy.url)
+ return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore
+
+ async def close(self) -> None:
+ await self._client.close()
+
+ async def __aenter__(self) -> Self:
+ await self._client.__aenter__()
+ return self
+
+ async def __aexit__(self, *exc_details: Any) -> None:
+ await self._client.__aexit__(*exc_details)
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_patch.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_patch.py
new file mode 100644
index 00000000000..8bcb627aa47
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/_patch.py
@@ -0,0 +1,21 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+ """Do not remove from this file.
+
+ `patch_sdk` is a last resort escape hatch that allows you to do customizations
+ you can't accomplish using the techniques described in
+ https://aka.ms/azsdk/python/dpcodegen/python/customize
+ """
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/__init__.py
new file mode 100644
index 00000000000..70b2a677e7b
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/__init__.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+from ._operations import Operations # type: ignore
+from ._managed_clusters_operations import ManagedClustersOperations # type: ignore
+from ._maintenance_configurations_operations import MaintenanceConfigurationsOperations # type: ignore
+from ._managed_namespaces_operations import ManagedNamespacesOperations # type: ignore
+from ._agent_pools_operations import AgentPoolsOperations # type: ignore
+from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations # type: ignore
+from ._private_link_resources_operations import PrivateLinkResourcesOperations # type: ignore
+from ._resolve_private_link_service_id_operations import ResolvePrivateLinkServiceIdOperations # type: ignore
+from ._snapshots_operations import SnapshotsOperations # type: ignore
+from ._trusted_access_role_bindings_operations import TrustedAccessRoleBindingsOperations # type: ignore
+from ._trusted_access_roles_operations import TrustedAccessRolesOperations # type: ignore
+from ._machines_operations import MachinesOperations # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+ "Operations",
+ "ManagedClustersOperations",
+ "MaintenanceConfigurationsOperations",
+ "ManagedNamespacesOperations",
+ "AgentPoolsOperations",
+ "PrivateEndpointConnectionsOperations",
+ "PrivateLinkResourcesOperations",
+ "ResolvePrivateLinkServiceIdOperations",
+ "SnapshotsOperations",
+ "TrustedAccessRoleBindingsOperations",
+ "TrustedAccessRolesOperations",
+ "MachinesOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
+_patch_sdk()
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_agent_pools_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_agent_pools_operations.py
new file mode 100644
index 00000000000..32e36defb7e
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_agent_pools_operations.py
@@ -0,0 +1,1214 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._agent_pools_operations import (
+ build_abort_latest_operation_request,
+ build_create_or_update_request,
+ build_delete_machines_request,
+ build_delete_request,
+ build_get_available_agent_pool_versions_request,
+ build_get_request,
+ build_get_upgrade_profile_request,
+ build_list_request,
+ build_upgrade_node_image_version_request,
+)
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class AgentPoolsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`agent_pools` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ async def _abort_latest_operation_initial(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_abort_latest_operation_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["location"] = self._deserialize("str", response.headers.get("location"))
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_abort_latest_operation(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Aborts last operation running on agent pool.
+
+ Aborts the currently running operation on the agent pool. The Agent Pool will be moved to a
+ Canceling state and eventually to a Canceled state when cancellation finishes. If the operation
+ completes before cancellation can take place, a 409 error code is returned.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._abort_latest_operation_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncItemPaged["_models.AgentPool"]:
+ """Gets a list of agent pools in the specified managed cluster.
+
+ Gets a list of agent pools in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either AgentPool or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AgentPoolListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> _models.AgentPool:
+ """Gets the specified managed cluster agent pool.
+
+ Gets the specified managed cluster agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: AgentPool or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.AgentPool
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("AgentPool", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ parameters: Union[_models.AgentPool, IO[bytes]],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "AgentPool")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ if_match=if_match,
+ if_none_match=if_none_match,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ parameters: _models.AgentPool,
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.AgentPool]:
+ """Creates or updates an agent pool in the specified managed cluster.
+
+ Creates or updates an agent pool in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param parameters: The agent pool to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.AgentPool
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either AgentPool or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ parameters: IO[bytes],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.AgentPool]:
+ """Creates or updates an agent pool in the specified managed cluster.
+
+ Creates or updates an agent pool in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param parameters: The agent pool to create or update. Required.
+ :type parameters: IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either AgentPool or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ parameters: Union[_models.AgentPool, IO[bytes]],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.AgentPool]:
+ """Creates or updates an agent pool in the specified managed cluster.
+
+ Creates or updates an agent pool in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param parameters: The agent pool to create or update. Is either a AgentPool type or a
+ IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.AgentPool or IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :return: An instance of AsyncLROPoller that returns either AgentPool or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ parameters=parameters,
+ if_match=if_match,
+ if_none_match=if_none_match,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("AgentPool", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.AgentPool].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.AgentPool](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ async def _delete_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ ignore_pod_disruption_budget: Optional[bool] = None,
+ if_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ ignore_pod_disruption_budget=ignore_pod_disruption_budget,
+ if_match=if_match,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ ignore_pod_disruption_budget: Optional[bool] = None,
+ if_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes an agent pool in the specified managed cluster.
+
+ Deletes an agent pool in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param ignore_pod_disruption_budget: ignore-pod-disruption-budget=true to delete those pods on
+ a node without considering Pod Disruption Budget. Default value is None.
+ :type ignore_pod_disruption_budget: bool
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ ignore_pod_disruption_budget=ignore_pod_disruption_budget,
+ if_match=if_match,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get_upgrade_profile(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> _models.AgentPoolUpgradeProfile:
+ """Gets the upgrade profile for an agent pool.
+
+ Gets the upgrade profile for an agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: AgentPoolUpgradeProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.AgentPoolUpgradeProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AgentPoolUpgradeProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_upgrade_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("AgentPoolUpgradeProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _delete_machines_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(machines, (IOBase, bytes)):
+ _content = machines
+ else:
+ _json = self._serialize.body(machines, "AgentPoolDeleteMachinesParameter")
+
+ _request = build_delete_machines_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_delete_machines(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machines: _models.AgentPoolDeleteMachinesParameter,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes specific machines in an agent pool.
+
+ Deletes specific machines in an agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param machines: A list of machines from the agent pool to be deleted. Required.
+ :type machines: ~azure.mgmt.containerservice.models.AgentPoolDeleteMachinesParameter
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_delete_machines(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machines: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes specific machines in an agent pool.
+
+ Deletes specific machines in an agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param machines: A list of machines from the agent pool to be deleted. Required.
+ :type machines: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_delete_machines(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes specific machines in an agent pool.
+
+ Deletes specific machines in an agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param machines: A list of machines from the agent pool to be deleted. Is either a
+ AgentPoolDeleteMachinesParameter type or a IO[bytes] type. Required.
+ :type machines: ~azure.mgmt.containerservice.models.AgentPoolDeleteMachinesParameter or
+ IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_machines_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ machines=machines,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace_async
+ async def get_available_agent_pool_versions(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> _models.AgentPoolAvailableVersions:
+ """Gets a list of supported Kubernetes versions for the specified agent pool.
+
+ See `supported Kubernetes versions
+ `_ for more details about
+ the version lifecycle.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: AgentPoolAvailableVersions or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.AgentPoolAvailableVersions
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AgentPoolAvailableVersions] = kwargs.pop("cls", None)
+
+ _request = build_get_available_agent_pool_versions_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("AgentPoolAvailableVersions", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _upgrade_node_image_version_initial(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_upgrade_node_image_version_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_upgrade_node_image_version(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[_models.AgentPool]:
+ """Upgrades the node image version of an agent pool to the latest.
+
+ Upgrading the node image version of an agent pool applies the newest OS and runtime updates to
+ the nodes. AKS provides one new image per week with the latest updates. For more details on
+ node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: An instance of AsyncLROPoller that returns either AgentPool or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._upgrade_node_image_version_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ response_headers = {}
+ response = pipeline_response.http_response
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("AgentPool", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.AgentPool].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.AgentPool](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_machines_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_machines_operations.py
new file mode 100644
index 00000000000..ed5c1c39eb3
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_machines_operations.py
@@ -0,0 +1,217 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from typing import Any, Callable, Optional, TypeVar
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._machines_operations import build_get_request, build_list_request
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class MachinesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`machines` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> AsyncItemPaged["_models.Machine"]:
+ """Gets a list of machines in the specified agent pool.
+
+ Gets a list of machines in the specified agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: An iterator like instance of either Machine or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.Machine]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MachineListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("MachineListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, machine_name: str, **kwargs: Any
+ ) -> _models.Machine:
+ """Get a specific machine in the specified agent pool.
+
+ Get a specific machine in the specified agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param machine_name: host name of the machine. Required.
+ :type machine_name: str
+ :return: Machine or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Machine
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.Machine] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ machine_name=machine_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Machine", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_maintenance_configurations_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_maintenance_configurations_operations.py
new file mode 100644
index 00000000000..e11d52fc87f
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_maintenance_configurations_operations.py
@@ -0,0 +1,434 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Optional, TypeVar, Union, overload
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._maintenance_configurations_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_by_managed_cluster_request,
+)
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class MaintenanceConfigurationsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`maintenance_configurations` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list_by_managed_cluster(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncItemPaged["_models.MaintenanceConfiguration"]:
+ """Gets a list of maintenance configurations in the specified managed cluster.
+
+ Gets a list of maintenance configurations in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either MaintenanceConfiguration or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.MaintenanceConfiguration]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MaintenanceConfigurationListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_by_managed_cluster_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
+ ) -> _models.MaintenanceConfiguration:
+ """Gets the specified maintenance configuration of a managed cluster.
+
+ Gets the specified maintenance configuration of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :return: MaintenanceConfiguration or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ config_name=config_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ config_name: str,
+ parameters: _models.MaintenanceConfiguration,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.MaintenanceConfiguration:
+ """Creates or updates a maintenance configuration in the specified managed cluster.
+
+ Creates or updates a maintenance configuration in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :param parameters: The maintenance configuration to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: MaintenanceConfiguration or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ config_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.MaintenanceConfiguration:
+ """Creates or updates a maintenance configuration in the specified managed cluster.
+
+ Creates or updates a maintenance configuration in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :param parameters: The maintenance configuration to create or update. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: MaintenanceConfiguration or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ config_name: str,
+ parameters: Union[_models.MaintenanceConfiguration, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.MaintenanceConfiguration:
+ """Creates or updates a maintenance configuration in the specified managed cluster.
+
+ Creates or updates a maintenance configuration in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :param parameters: The maintenance configuration to create or update. Is either a
+ MaintenanceConfiguration type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.MaintenanceConfiguration or IO[bytes]
+ :return: MaintenanceConfiguration or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "MaintenanceConfiguration")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ config_name=config_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def delete(self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any) -> None:
+ """Deletes a maintenance configuration.
+
+ Deletes a maintenance configuration.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ config_name=config_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_managed_clusters_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_managed_clusters_operations.py
new file mode 100644
index 00000000000..69e06849da8
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_managed_clusters_operations.py
@@ -0,0 +1,2969 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._managed_clusters_operations import (
+ build_abort_latest_operation_request,
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_access_profile_request,
+ build_get_command_result_request,
+ build_get_mesh_revision_profile_request,
+ build_get_mesh_upgrade_profile_request,
+ build_get_request,
+ build_get_upgrade_profile_request,
+ build_list_by_resource_group_request,
+ build_list_cluster_admin_credentials_request,
+ build_list_cluster_monitoring_user_credentials_request,
+ build_list_cluster_user_credentials_request,
+ build_list_kubernetes_versions_request,
+ build_list_mesh_revision_profiles_request,
+ build_list_mesh_upgrade_profiles_request,
+ build_list_outbound_network_dependencies_endpoints_request,
+ build_list_request,
+ build_reset_aad_profile_request,
+ build_reset_service_principal_profile_request,
+ build_rotate_cluster_certificates_request,
+ build_rotate_service_account_signing_keys_request,
+ build_run_command_request,
+ build_start_request,
+ build_stop_request,
+ build_update_tags_request,
+)
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class ManagedClustersOperations: # pylint: disable=too-many-public-methods
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`managed_clusters` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace_async
+ async def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.KubernetesVersionListResult:
+ """Gets a list of supported Kubernetes versions in the specified subscription.
+
+ Contains extra metadata on the version, including supported patch versions, capabilities,
+ available upgrades, and details on preview status of the version.
+
+ :param location: The name of the Azure region. Required.
+ :type location: str
+ :return: KubernetesVersionListResult or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.KubernetesVersionListResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.KubernetesVersionListResult] = kwargs.pop("cls", None)
+
+ _request = build_list_kubernetes_versions_request(
+ location=location,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("KubernetesVersionListResult", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list(self, **kwargs: Any) -> AsyncItemPaged["_models.ManagedCluster"]:
+ """Gets a list of managed clusters in the specified subscription.
+
+ Gets a list of managed clusters in the specified subscription.
+
+ :return: An iterator like instance of either ManagedCluster or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def list_by_resource_group(
+ self, resource_group_name: str, **kwargs: Any
+ ) -> AsyncItemPaged["_models.ManagedCluster"]:
+ """Lists managed clusters in the specified subscription and resource group.
+
+ Lists managed clusters in the specified subscription and resource group.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :return: An iterator like instance of either ManagedCluster or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_by_resource_group_request(
+ resource_group_name=resource_group_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get_upgrade_profile(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> _models.ManagedClusterUpgradeProfile:
+ """Gets the upgrade profile of a managed cluster.
+
+ Gets the upgrade profile of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: ManagedClusterUpgradeProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedClusterUpgradeProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedClusterUpgradeProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_upgrade_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedClusterUpgradeProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def get_access_profile(
+ self, resource_group_name: str, resource_name: str, role_name: str, **kwargs: Any
+ ) -> _models.ManagedClusterAccessProfile:
+ """Gets an access profile of a managed cluster.
+
+ **WARNING**\\ : This API will be deprecated. Instead use `ListClusterUserCredentials
+ `_ or
+ `ListClusterAdminCredentials
+ `_ .
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param role_name: The name of the role for managed cluster accessProfile resource. Required.
+ :type role_name: str
+ :return: ManagedClusterAccessProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedClusterAccessProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedClusterAccessProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_access_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ role_name=role_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedClusterAccessProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def list_cluster_admin_credentials(
+ self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any
+ ) -> _models.CredentialResults:
+ """Lists the admin credentials of a managed cluster.
+
+ Lists the admin credentials of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
+ :type server_fqdn: str
+ :return: CredentialResults or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.CredentialResults
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None)
+
+ _request = build_list_cluster_admin_credentials_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ server_fqdn=server_fqdn,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CredentialResults", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def list_cluster_user_credentials(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ server_fqdn: Optional[str] = None,
+ format: Optional[Union[str, _models.Format]] = None,
+ **kwargs: Any
+ ) -> _models.CredentialResults:
+ """Lists the user credentials of a managed cluster.
+
+ Lists the user credentials of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
+ :type server_fqdn: str
+ :param format: Only apply to AAD clusters, specifies the format of returned kubeconfig. Format
+ 'azure' will return azure auth-provider kubeconfig; format 'exec' will return exec format
+ kubeconfig, which requires kubelogin binary in the path. Known values are: "azure", "exec", and
+ "exec". Default value is None.
+ :type format: str or ~azure.mgmt.containerservice.models.Format
+ :return: CredentialResults or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.CredentialResults
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None)
+
+ _request = build_list_cluster_user_credentials_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ server_fqdn=server_fqdn,
+ format=format,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CredentialResults", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def list_cluster_monitoring_user_credentials(
+ self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any
+ ) -> _models.CredentialResults:
+ """Lists the cluster monitoring user credentials of a managed cluster.
+
+ Lists the cluster monitoring user credentials of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
+ :type server_fqdn: str
+ :return: CredentialResults or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.CredentialResults
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None)
+
+ _request = build_list_cluster_monitoring_user_credentials_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ server_fqdn=server_fqdn,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CredentialResults", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.ManagedCluster:
+ """Gets a managed cluster.
+
+ Gets a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: ManagedCluster or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedCluster
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedCluster, IO[bytes]],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ManagedCluster")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ if_match=if_match,
+ if_none_match=if_none_match,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.ManagedCluster,
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedCluster]:
+ """Creates or updates a managed cluster.
+
+ Creates or updates a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The managed cluster to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedCluster
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedCluster]:
+ """Creates or updates a managed cluster.
+
+ Creates or updates a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The managed cluster to create or update. Required.
+ :type parameters: IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedCluster, IO[bytes]],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedCluster]:
+ """Creates or updates a managed cluster.
+
+ Creates or updates a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The managed cluster to create or update. Is either a ManagedCluster type or
+ a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedCluster or IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ parameters=parameters,
+ if_match=if_match,
+ if_none_match=if_none_match,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.ManagedCluster].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.ManagedCluster](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ async def _update_tags_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.TagsObject, IO[bytes]],
+ if_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "TagsObject")
+
+ _request = build_update_tags_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ if_match=if_match,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.TagsObject,
+ if_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedCluster]:
+ """Updates tags on a managed cluster.
+
+ Updates tags on a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ if_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedCluster]:
+ """Updates tags on a managed cluster.
+
+ Updates tags on a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required.
+ :type parameters: IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.TagsObject, IO[bytes]],
+ if_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedCluster]:
+ """Updates tags on a managed cluster.
+
+ Updates tags on a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Is either
+ a TagsObject type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._update_tags_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ parameters=parameters,
+ if_match=if_match,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.ManagedCluster].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.ManagedCluster](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ async def _delete_initial(
+ self, resource_group_name: str, resource_name: str, if_match: Optional[str] = None, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ if_match=if_match,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, resource_name: str, if_match: Optional[str] = None, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes a managed cluster.
+
+ Deletes a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ if_match=if_match,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ async def _reset_service_principal_profile_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ManagedClusterServicePrincipalProfile")
+
+ _request = build_reset_service_principal_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_reset_service_principal_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.ManagedClusterServicePrincipalProfile,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Reset the Service Principal Profile of a managed cluster.
+
+ This action cannot be performed on a cluster that is not using a service principal.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The service principal profile to set on the managed cluster. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_reset_service_principal_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Reset the Service Principal Profile of a managed cluster.
+
+ This action cannot be performed on a cluster that is not using a service principal.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The service principal profile to set on the managed cluster. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_reset_service_principal_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Reset the Service Principal Profile of a managed cluster.
+
+ This action cannot be performed on a cluster that is not using a service principal.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The service principal profile to set on the managed cluster. Is either a
+ ManagedClusterServicePrincipalProfile type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile or
+ IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._reset_service_principal_profile_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ parameters=parameters,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ async def _reset_aad_profile_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ManagedClusterAADProfile")
+
+ _request = build_reset_aad_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_reset_aad_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.ManagedClusterAADProfile,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Reset the AAD Profile of a managed cluster.
+
+ **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory
+ integration `_ to update your cluster with AKS-managed Azure
+ AD.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The AAD profile to set on the Managed Cluster. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_reset_aad_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Reset the AAD Profile of a managed cluster.
+
+ **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory
+ integration `_ to update your cluster with AKS-managed Azure
+ AD.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The AAD profile to set on the Managed Cluster. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_reset_aad_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Reset the AAD Profile of a managed cluster.
+
+ **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory
+ integration `_ to update your cluster with AKS-managed Azure
+ AD.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The AAD profile to set on the Managed Cluster. Is either a
+ ManagedClusterAADProfile type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._reset_aad_profile_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ parameters=parameters,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ async def _rotate_cluster_certificates_initial(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_rotate_cluster_certificates_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_rotate_cluster_certificates(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Rotates the certificates of a managed cluster.
+
+ See `Certificate rotation `_ for
+ more details about rotating managed cluster certificates.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._rotate_cluster_certificates_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ async def _abort_latest_operation_initial(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_abort_latest_operation_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["location"] = self._deserialize("str", response.headers.get("location"))
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_abort_latest_operation(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Aborts last operation running on managed cluster.
+
+ Aborts the currently running operation on the managed cluster. The Managed Cluster will be
+ moved to a Canceling state and eventually to a Canceled state when cancellation finishes. If
+ the operation completes before cancellation can take place, a 409 error code is returned.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._abort_latest_operation_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ async def _rotate_service_account_signing_keys_initial( # pylint: disable=name-too-long
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_rotate_service_account_signing_keys_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_rotate_service_account_signing_keys( # pylint: disable=name-too-long
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Rotates the service account signing keys of a managed cluster.
+
+ Rotates the service account signing keys of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._rotate_service_account_signing_keys_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ async def _stop_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_stop_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_stop(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
+ """Stops a Managed Cluster.
+
+ This can only be performed on Azure Virtual Machine Scale set backed clusters. Stopping a
+ cluster stops the control plane and agent nodes entirely, while maintaining all object and
+ cluster state. A cluster does not accrue charges while it is stopped. See `stopping a cluster
+ `_ for more details about stopping a
+ cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._stop_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ async def _start_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_start_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_start(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
+ """Starts a previously stopped Managed Cluster.
+
+ See `starting a cluster `_ for more
+ details about starting a cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._start_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ async def _run_command_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ request_payload: Union[_models.RunCommandRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(request_payload, (IOBase, bytes)):
+ _content = request_payload
+ else:
+ _json = self._serialize.body(request_payload, "RunCommandRequest")
+
+ _request = build_run_command_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_run_command(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ request_payload: _models.RunCommandRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.RunCommandResult]:
+ """Submits a command to run against the Managed Cluster.
+
+ AKS will create a pod to run the command. This is primarily useful for private clusters. For
+ more information see `AKS Run Command
+ `_.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param request_payload: The run command request. Required.
+ :type request_payload: ~azure.mgmt.containerservice.models.RunCommandRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.RunCommandResult]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_run_command(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ request_payload: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.RunCommandResult]:
+ """Submits a command to run against the Managed Cluster.
+
+ AKS will create a pod to run the command. This is primarily useful for private clusters. For
+ more information see `AKS Run Command
+ `_.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param request_payload: The run command request. Required.
+ :type request_payload: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.RunCommandResult]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_run_command(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ request_payload: Union[_models.RunCommandRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.RunCommandResult]:
+ """Submits a command to run against the Managed Cluster.
+
+ AKS will create a pod to run the command. This is primarily useful for private clusters. For
+ more information see `AKS Run Command
+ `_.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param request_payload: The run command request. Is either a RunCommandRequest type or a
+ IO[bytes] type. Required.
+ :type request_payload: ~azure.mgmt.containerservice.models.RunCommandRequest or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.RunCommandResult]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.RunCommandResult] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._run_command_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ request_payload=request_payload,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("RunCommandResult", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(
+ AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.RunCommandResult].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.RunCommandResult](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ @distributed_trace_async
+ async def get_command_result(
+ self, resource_group_name: str, resource_name: str, command_id: str, **kwargs: Any
+ ) -> Optional[_models.RunCommandResult]:
+ """Gets the results of a command which has been run on the Managed Cluster.
+
+ Gets the results of a command which has been run on the Managed Cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param command_id: Id of the command. Required.
+ :type command_id: str
+ :return: RunCommandResult or None or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.RunCommandResult or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Optional[_models.RunCommandResult]] = kwargs.pop("cls", None)
+
+ _request = build_get_command_result_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ command_id=command_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("RunCommandResult", pipeline_response.http_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-long
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncItemPaged["_models.OutboundEnvironmentEndpoint"]:
+ """Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the
+ specified managed cluster.
+
+ Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the
+ specified managed cluster. The operation returns properties of each egress endpoint.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either OutboundEnvironmentEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.OutboundEnvironmentEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundEnvironmentEndpointCollection] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_outbound_network_dependencies_endpoints_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("OutboundEnvironmentEndpointCollection", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def list_mesh_revision_profiles(
+ self, location: str, **kwargs: Any
+ ) -> AsyncItemPaged["_models.MeshRevisionProfile"]:
+ """Lists mesh revision profiles for all meshes in the specified location.
+
+ Contains extra metadata on each revision, including supported revisions, cluster compatibility
+ and available upgrades.
+
+ :param location: The name of the Azure region. Required.
+ :type location: str
+ :return: An iterator like instance of either MeshRevisionProfile or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.MeshRevisionProfile]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MeshRevisionProfileList] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_mesh_revision_profiles_request(
+ location=location,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("MeshRevisionProfileList", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: Any) -> _models.MeshRevisionProfile:
+ """Gets a mesh revision profile for a specified mesh in the specified location.
+
+ Contains extra metadata on the revision, including supported revisions, cluster compatibility
+ and available upgrades.
+
+ :param location: The name of the Azure region. Required.
+ :type location: str
+ :param mode: The mode of the mesh. Required.
+ :type mode: str
+ :return: MeshRevisionProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MeshRevisionProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MeshRevisionProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_mesh_revision_profile_request(
+ location=location,
+ mode=mode,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MeshRevisionProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list_mesh_upgrade_profiles(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncItemPaged["_models.MeshUpgradeProfile"]:
+ """Lists available upgrades for all service meshes in a specific cluster.
+
+ Lists available upgrades for all service meshes in a specific cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either MeshUpgradeProfile or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.MeshUpgradeProfile]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MeshUpgradeProfileList] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_mesh_upgrade_profiles_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("MeshUpgradeProfileList", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get_mesh_upgrade_profile(
+ self, resource_group_name: str, resource_name: str, mode: str, **kwargs: Any
+ ) -> _models.MeshUpgradeProfile:
+ """Gets available upgrades for a service mesh in a cluster.
+
+ Gets available upgrades for a service mesh in a cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param mode: The mode of the mesh. Required.
+ :type mode: str
+ :return: MeshUpgradeProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MeshUpgradeProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MeshUpgradeProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_mesh_upgrade_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ mode=mode,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MeshUpgradeProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_managed_namespaces_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_managed_namespaces_operations.py
new file mode 100644
index 00000000000..ecd1869246e
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_managed_namespaces_operations.py
@@ -0,0 +1,809 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._managed_namespaces_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_by_managed_cluster_request,
+ build_list_credential_request,
+ build_update_request,
+)
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class ManagedNamespacesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`managed_namespaces` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list_by_managed_cluster(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncItemPaged["_models.ManagedNamespace"]:
+ """Gets a list of managed namespaces in the specified managed cluster.
+
+ Gets a list of managed namespaces in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either ManagedNamespace or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedNamespaceListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_by_managed_cluster_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("ManagedNamespaceListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any
+ ) -> _models.ManagedNamespace:
+ """Gets the specified namespace of a managed cluster.
+
+ Gets the specified namespace of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :return: ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: Union[_models.ManagedNamespace, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ManagedNamespace")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: _models.ManagedNamespace,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNamespace]:
+ """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: The namespace to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ManagedNamespace or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNamespace]:
+ """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: The namespace to create or update. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either ManagedNamespace or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: Union[_models.ManagedNamespace, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.ManagedNamespace]:
+ """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: The namespace to create or update. Is either a ManagedNamespace type or a
+ IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedNamespace or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either ManagedNamespace or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ parameters=parameters,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ response_headers = {}
+ response = pipeline_response.http_response
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.ManagedNamespace].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.ManagedNamespace](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ async def _delete_initial(
+ self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ if response.status_code == 204:
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes a namespace.
+
+ Deletes a namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: _models.TagsObject,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.ManagedNamespace:
+ """Updates tags on a managed namespace.
+
+ Updates tags on a managed namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: Parameters supplied to the patch namespace operation, we only support patch
+ tags for now. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.ManagedNamespace:
+ """Updates tags on a managed namespace.
+
+ Updates tags on a managed namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: Parameters supplied to the patch namespace operation, we only support patch
+ tags for now. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: Union[_models.TagsObject, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.ManagedNamespace:
+ """Updates tags on a managed namespace.
+
+ Updates tags on a managed namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: Parameters supplied to the patch namespace operation, we only support patch
+ tags for now. Is either a TagsObject type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes]
+ :return: ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "TagsObject")
+
+ _request = build_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def list_credential(
+ self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any
+ ) -> _models.CredentialResults:
+ """Lists the credentials of a namespace.
+
+ Lists the credentials of a namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :return: CredentialResults or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.CredentialResults
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None)
+
+ _request = build_list_credential_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CredentialResults", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_operations.py
new file mode 100644
index 00000000000..6277992c37d
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_operations.py
@@ -0,0 +1,135 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from typing import Any, Callable, Optional, TypeVar
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._operations import build_list_request
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class Operations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`operations` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(self, **kwargs: Any) -> AsyncItemPaged["_models.OperationValue"]:
+ """Gets a list of operations.
+
+ Gets a list of operations.
+
+ :return: An iterator like instance of either OperationValue or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.OperationValue]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("OperationListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_patch.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_patch.py
new file mode 100644
index 00000000000..8bcb627aa47
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_patch.py
@@ -0,0 +1,21 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+ """Do not remove from this file.
+
+ `patch_sdk` is a last resort escape hatch that allows you to do customizations
+ you can't accomplish using the techniques described in
+ https://aka.ms/azsdk/python/dpcodegen/python/customize
+ """
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_private_endpoint_connections_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_private_endpoint_connections_operations.py
new file mode 100644
index 00000000000..832f404c47e
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_private_endpoint_connections_operations.py
@@ -0,0 +1,459 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._private_endpoint_connections_operations import (
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+ build_update_request,
+)
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class PrivateEndpointConnectionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`private_endpoint_connections` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace_async
+ async def list(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> _models.PrivateEndpointConnectionListResult:
+ """Gets a list of private endpoint connections in the specified managed cluster.
+
+ To learn more about private clusters, see:
+ https://docs.microsoft.com/azure/aks/private-clusters.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: PrivateEndpointConnectionListResult or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnectionListResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
+ ) -> _models.PrivateEndpointConnection:
+ """Gets the specified private endpoint connection.
+
+ To learn more about private clusters, see:
+ https://docs.microsoft.com/azure/aks/private-clusters.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :return: PrivateEndpointConnection or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ parameters: _models.PrivateEndpointConnection,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.PrivateEndpointConnection:
+ """Updates a private endpoint connection.
+
+ Updates a private endpoint connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :param parameters: The updated private endpoint connection. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: PrivateEndpointConnection or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.PrivateEndpointConnection:
+ """Updates a private endpoint connection.
+
+ Updates a private endpoint connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :param parameters: The updated private endpoint connection. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: PrivateEndpointConnection or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ parameters: Union[_models.PrivateEndpointConnection, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.PrivateEndpointConnection:
+ """Updates a private endpoint connection.
+
+ Updates a private endpoint connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :param parameters: The updated private endpoint connection. Is either a
+ PrivateEndpointConnection type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.PrivateEndpointConnection or IO[bytes]
+ :return: PrivateEndpointConnection or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "PrivateEndpointConnection")
+
+ _request = build_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _delete_initial(
+ self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Deletes a private endpoint connection.
+
+ Deletes a private endpoint connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_private_link_resources_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_private_link_resources_operations.py
new file mode 100644
index 00000000000..75359f8ffed
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_private_link_resources_operations.py
@@ -0,0 +1,117 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from typing import Any, Callable, Optional, TypeVar
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._private_link_resources_operations import build_list_request
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class PrivateLinkResourcesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`private_link_resources` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace_async
+ async def list(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> _models.PrivateLinkResourcesListResult:
+ """Gets a list of private link resources in the specified managed cluster.
+
+ To learn more about private clusters, see:
+ https://docs.microsoft.com/azure/aks/private-clusters.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: PrivateLinkResourcesListResult or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResourcesListResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None)
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_resolve_private_link_service_id_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_resolve_private_link_service_id_operations.py
new file mode 100644
index 00000000000..e173bbbc3ef
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_resolve_private_link_service_id_operations.py
@@ -0,0 +1,194 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Optional, TypeVar, Union, overload
+
+from azure.core import AsyncPipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._resolve_private_link_service_id_operations import build_post_request
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class ResolvePrivateLinkServiceIdOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`resolve_private_link_service_id` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @overload
+ async def post(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.PrivateLinkResource,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.PrivateLinkResource:
+ """Gets the private link service ID for the specified managed cluster.
+
+ Gets the private link service ID for the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters required in order to resolve a private link service ID. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.PrivateLinkResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: PrivateLinkResource or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def post(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.PrivateLinkResource:
+ """Gets the private link service ID for the specified managed cluster.
+
+ Gets the private link service ID for the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters required in order to resolve a private link service ID. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: PrivateLinkResource or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def post(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.PrivateLinkResource, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.PrivateLinkResource:
+ """Gets the private link service ID for the specified managed cluster.
+
+ Gets the private link service ID for the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters required in order to resolve a private link service ID. Is either
+ a PrivateLinkResource type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.PrivateLinkResource or IO[bytes]
+ :return: PrivateLinkResource or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "PrivateLinkResource")
+
+ _request = build_post_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateLinkResource", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_snapshots_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_snapshots_operations.py
new file mode 100644
index 00000000000..a09e86da1a9
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_snapshots_operations.py
@@ -0,0 +1,626 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Optional, TypeVar, Union, overload
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._snapshots_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_by_resource_group_request,
+ build_list_request,
+ build_update_tags_request,
+)
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class SnapshotsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`snapshots` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(self, **kwargs: Any) -> AsyncItemPaged["_models.Snapshot"]:
+ """Gets a list of snapshots in the specified subscription.
+
+ Gets a list of snapshots in the specified subscription.
+
+ :return: An iterator like instance of either Snapshot or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.Snapshot]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("SnapshotListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncItemPaged["_models.Snapshot"]:
+ """Lists snapshots in the specified subscription and resource group.
+
+ Lists snapshots in the specified subscription and resource group.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :return: An iterator like instance of either Snapshot or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.Snapshot]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_by_resource_group_request(
+ resource_group_name=resource_group_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("SnapshotListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.Snapshot:
+ """Gets a snapshot.
+
+ Gets a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Snapshot", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.Snapshot,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Creates or updates a snapshot.
+
+ Creates or updates a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The snapshot to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.Snapshot
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Creates or updates a snapshot.
+
+ Creates or updates a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The snapshot to create or update. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.Snapshot, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Creates or updates a snapshot.
+
+ Creates or updates a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The snapshot to create or update. Is either a Snapshot type or a IO[bytes]
+ type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.Snapshot or IO[bytes]
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "Snapshot")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Snapshot", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.TagsObject,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Updates tags on a snapshot.
+
+ Updates tags on a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update snapshot Tags operation. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Updates tags on a snapshot.
+
+ Updates tags on a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update snapshot Tags operation. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.TagsObject, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Updates tags on a snapshot.
+
+ Updates tags on a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update snapshot Tags operation. Is either a
+ TagsObject type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes]
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "TagsObject")
+
+ _request = build_update_tags_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Snapshot", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def delete(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> None:
+ """Deletes a snapshot.
+
+ Deletes a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_trusted_access_role_bindings_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_trusted_access_role_bindings_operations.py
new file mode 100644
index 00000000000..07c07d19d19
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_trusted_access_role_bindings_operations.py
@@ -0,0 +1,560 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, AsyncIterator, Callable, IO, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._trusted_access_role_bindings_operations import (
+ build_create_or_update_request,
+ build_delete_request,
+ build_get_request,
+ build_list_request,
+)
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class TrustedAccessRoleBindingsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`trusted_access_role_bindings` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> AsyncItemPaged["_models.TrustedAccessRoleBinding"]:
+ """List trusted access role bindings.
+
+ List trusted access role bindings.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either TrustedAccessRoleBinding or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.TrustedAccessRoleBindingListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("TrustedAccessRoleBindingListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
+
+ @distributed_trace_async
+ async def get(
+ self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any
+ ) -> _models.TrustedAccessRoleBinding:
+ """Get a trusted access role binding.
+
+ Get a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :return: TrustedAccessRoleBinding or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(trusted_access_role_binding, (IOBase, bytes)):
+ _content = trusted_access_role_binding
+ else:
+ _json = self._serialize.body(trusted_access_role_binding, "TrustedAccessRoleBinding")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ trusted_access_role_binding: _models.TrustedAccessRoleBinding,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.TrustedAccessRoleBinding]:
+ """Create or update a trusted access role binding.
+
+ Create or update a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :param trusted_access_role_binding: A trusted access role binding. Required.
+ :type trusted_access_role_binding: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ trusted_access_role_binding: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.TrustedAccessRoleBinding]:
+ """Create or update a trusted access role binding.
+
+ Create or update a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :param trusted_access_role_binding: A trusted access role binding. Required.
+ :type trusted_access_role_binding: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace_async
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]],
+ **kwargs: Any
+ ) -> AsyncLROPoller[_models.TrustedAccessRoleBinding]:
+ """Create or update a trusted access role binding.
+
+ Create or update a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :param trusted_access_role_binding: A trusted access role binding. Is either a
+ TrustedAccessRoleBinding type or a IO[bytes] type. Required.
+ :type trusted_access_role_binding: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding
+ or IO[bytes]
+ :return: An instance of AsyncLROPoller that returns either TrustedAccessRoleBinding or the
+ result of cls(response)
+ :rtype:
+ ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ trusted_access_role_binding=trusted_access_role_binding,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[_models.TrustedAccessRoleBinding].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[_models.TrustedAccessRoleBinding](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ async def _delete_initial(
+ self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any
+ ) -> AsyncIterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ await response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace_async
+ async def begin_delete(
+ self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any
+ ) -> AsyncLROPoller[None]:
+ """Delete a trusted access role binding.
+
+ Delete a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ await raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return AsyncLROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return AsyncLROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_trusted_access_roles_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_trusted_access_roles_operations.py
new file mode 100644
index 00000000000..cc473f5fdc0
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/aio/operations/_trusted_access_roles_operations.py
@@ -0,0 +1,139 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from typing import Any, Callable, Optional, TypeVar
+import urllib.parse
+
+from azure.core import AsyncPipelineClient
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import AsyncHttpResponse, HttpRequest
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models as _models
+from ..._utils.serialization import Deserializer, Serializer
+from ...operations._trusted_access_roles_operations import build_list_request
+from .._configuration import ContainerServiceClientConfiguration
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+
+class TrustedAccessRolesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.aio.ContainerServiceClient`'s
+ :attr:`trusted_access_roles` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(self, location: str, **kwargs: Any) -> AsyncItemPaged["_models.TrustedAccessRole"]:
+ """List supported trusted access roles.
+
+ List supported trusted access roles.
+
+ :param location: The name of the Azure region. Required.
+ :type location: str
+ :return: An iterator like instance of either TrustedAccessRole or the result of cls(response)
+ :rtype:
+ ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.models.TrustedAccessRole]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ location=location,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize("TrustedAccessRoleListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(get_next, extract_data)
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/__init__.py
new file mode 100644
index 00000000000..1a65b56f013
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/__init__.py
@@ -0,0 +1,506 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+
+from ._models_py3 import ( # type: ignore
+ AbsoluteMonthlySchedule,
+ AdvancedNetworking,
+ AdvancedNetworkingObservability,
+ AdvancedNetworkingSecurity,
+ AgentPool,
+ AgentPoolAvailableVersions,
+ AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem,
+ AgentPoolDeleteMachinesParameter,
+ AgentPoolGatewayProfile,
+ AgentPoolListResult,
+ AgentPoolNetworkProfile,
+ AgentPoolSecurityProfile,
+ AgentPoolStatus,
+ AgentPoolUpgradeProfile,
+ AgentPoolUpgradeProfilePropertiesUpgradesItem,
+ AgentPoolUpgradeSettings,
+ AgentPoolWindowsProfile,
+ AzureKeyVaultKms,
+ ClusterUpgradeSettings,
+ CompatibleVersions,
+ ContainerServiceLinuxProfile,
+ ContainerServiceNetworkProfile,
+ ContainerServiceSshConfiguration,
+ ContainerServiceSshPublicKey,
+ CreationData,
+ CredentialResult,
+ CredentialResults,
+ DailySchedule,
+ DateSpan,
+ DelegatedResource,
+ EndpointDependency,
+ EndpointDetail,
+ ErrorAdditionalInfo,
+ ErrorDetail,
+ ErrorResponse,
+ ExtendedLocation,
+ GPUProfile,
+ IPTag,
+ IstioCertificateAuthority,
+ IstioComponents,
+ IstioEgressGateway,
+ IstioIngressGateway,
+ IstioPluginCertificateAuthority,
+ IstioServiceMesh,
+ KubeletConfig,
+ KubernetesPatchVersion,
+ KubernetesVersion,
+ KubernetesVersionCapabilities,
+ KubernetesVersionListResult,
+ LinuxOSConfig,
+ LocalDNSOverride,
+ LocalDNSProfile,
+ Machine,
+ MachineIpAddress,
+ MachineListResult,
+ MachineNetworkProperties,
+ MachineProperties,
+ MaintenanceConfiguration,
+ MaintenanceConfigurationListResult,
+ MaintenanceWindow,
+ ManagedCluster,
+ ManagedClusterAADProfile,
+ ManagedClusterAIToolchainOperatorProfile,
+ ManagedClusterAPIServerAccessProfile,
+ ManagedClusterAccessProfile,
+ ManagedClusterAddonProfile,
+ ManagedClusterAddonProfileIdentity,
+ ManagedClusterAgentPoolProfile,
+ ManagedClusterAgentPoolProfileProperties,
+ ManagedClusterAutoUpgradeProfile,
+ ManagedClusterAzureMonitorProfile,
+ ManagedClusterAzureMonitorProfileKubeStateMetrics,
+ ManagedClusterAzureMonitorProfileMetrics,
+ ManagedClusterBootstrapProfile,
+ ManagedClusterCostAnalysis,
+ ManagedClusterHTTPProxyConfig,
+ ManagedClusterIdentity,
+ ManagedClusterIngressProfile,
+ ManagedClusterIngressProfileNginx,
+ ManagedClusterIngressProfileWebAppRouting,
+ ManagedClusterListResult,
+ ManagedClusterLoadBalancerProfile,
+ ManagedClusterLoadBalancerProfileManagedOutboundIPs,
+ ManagedClusterLoadBalancerProfileOutboundIPPrefixes,
+ ManagedClusterLoadBalancerProfileOutboundIPs,
+ ManagedClusterManagedOutboundIPProfile,
+ ManagedClusterMetricsProfile,
+ ManagedClusterNATGatewayProfile,
+ ManagedClusterNodeProvisioningProfile,
+ ManagedClusterNodeResourceGroupProfile,
+ ManagedClusterOIDCIssuerProfile,
+ ManagedClusterPodIdentity,
+ ManagedClusterPodIdentityException,
+ ManagedClusterPodIdentityProfile,
+ ManagedClusterPodIdentityProvisioningError,
+ ManagedClusterPodIdentityProvisioningErrorBody,
+ ManagedClusterPodIdentityProvisioningInfo,
+ ManagedClusterPoolUpgradeProfile,
+ ManagedClusterPoolUpgradeProfileUpgradesItem,
+ ManagedClusterPropertiesAutoScalerProfile,
+ ManagedClusterSKU,
+ ManagedClusterSecurityProfile,
+ ManagedClusterSecurityProfileDefender,
+ ManagedClusterSecurityProfileDefenderSecurityMonitoring,
+ ManagedClusterSecurityProfileImageCleaner,
+ ManagedClusterSecurityProfileWorkloadIdentity,
+ ManagedClusterServicePrincipalProfile,
+ ManagedClusterStaticEgressGatewayProfile,
+ ManagedClusterStatus,
+ ManagedClusterStorageProfile,
+ ManagedClusterStorageProfileBlobCSIDriver,
+ ManagedClusterStorageProfileDiskCSIDriver,
+ ManagedClusterStorageProfileFileCSIDriver,
+ ManagedClusterStorageProfileSnapshotController,
+ ManagedClusterUpgradeProfile,
+ ManagedClusterWindowsProfile,
+ ManagedClusterWorkloadAutoScalerProfile,
+ ManagedClusterWorkloadAutoScalerProfileKeda,
+ ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler,
+ ManagedNamespace,
+ ManagedNamespaceListResult,
+ ManagedServiceIdentityUserAssignedIdentitiesValue,
+ ManualScaleProfile,
+ MeshRevision,
+ MeshRevisionProfile,
+ MeshRevisionProfileList,
+ MeshRevisionProfileProperties,
+ MeshUpgradeProfile,
+ MeshUpgradeProfileList,
+ MeshUpgradeProfileProperties,
+ NamespaceProperties,
+ NetworkPolicies,
+ OperationListResult,
+ OperationValue,
+ OutboundEnvironmentEndpoint,
+ OutboundEnvironmentEndpointCollection,
+ PortRange,
+ PowerState,
+ PrivateEndpoint,
+ PrivateEndpointConnection,
+ PrivateEndpointConnectionListResult,
+ PrivateLinkResource,
+ PrivateLinkResourcesListResult,
+ PrivateLinkServiceConnectionState,
+ ProxyResource,
+ RelativeMonthlySchedule,
+ Resource,
+ ResourceQuota,
+ ResourceReference,
+ RunCommandRequest,
+ RunCommandResult,
+ ScaleProfile,
+ Schedule,
+ ServiceMeshProfile,
+ Snapshot,
+ SnapshotListResult,
+ SubResource,
+ SysctlConfig,
+ SystemData,
+ TagsObject,
+ TimeInWeek,
+ TimeSpan,
+ TrackedResource,
+ TrustedAccessRole,
+ TrustedAccessRoleBinding,
+ TrustedAccessRoleBindingListResult,
+ TrustedAccessRoleListResult,
+ TrustedAccessRoleRule,
+ UpgradeOverrideSettings,
+ UserAssignedIdentity,
+ VirtualMachineNodes,
+ VirtualMachinesProfile,
+ WeeklySchedule,
+ WindowsGmsaProfile,
+)
+
+from ._container_service_client_enums import ( # type: ignore
+ AdoptionPolicy,
+ AdvancedNetworkPolicies,
+ AgentPoolMode,
+ AgentPoolSSHAccess,
+ AgentPoolType,
+ ArtifactSource,
+ BackendPoolType,
+ Code,
+ ConnectionStatus,
+ CreatedByType,
+ DeletePolicy,
+ Expander,
+ ExtendedLocationTypes,
+ Format,
+ GPUDriver,
+ GPUInstanceProfile,
+ IpFamily,
+ IstioIngressGatewayMode,
+ KeyVaultNetworkAccessTypes,
+ KubeletDiskType,
+ KubernetesSupportPlan,
+ LicenseType,
+ LoadBalancerSku,
+ LocalDNSForwardDestination,
+ LocalDNSForwardPolicy,
+ LocalDNSMode,
+ LocalDNSProtocol,
+ LocalDNSQueryLogging,
+ LocalDNSServeStale,
+ LocalDNSState,
+ ManagedClusterPodIdentityProvisioningState,
+ ManagedClusterSKUName,
+ ManagedClusterSKUTier,
+ NamespaceProvisioningState,
+ NetworkDataplane,
+ NetworkMode,
+ NetworkPlugin,
+ NetworkPluginMode,
+ NetworkPolicy,
+ NginxIngressControllerType,
+ NodeOSUpgradeChannel,
+ NodeProvisioningDefaultNodePools,
+ NodeProvisioningMode,
+ OSDiskType,
+ OSSKU,
+ OSType,
+ OutboundType,
+ PodIPAllocationMode,
+ PolicyRule,
+ PrivateEndpointConnectionProvisioningState,
+ Protocol,
+ PublicNetworkAccess,
+ ResourceIdentityType,
+ RestrictionLevel,
+ ScaleDownMode,
+ ScaleSetEvictionPolicy,
+ ScaleSetPriority,
+ ServiceMeshMode,
+ SnapshotType,
+ TrustedAccessRoleBindingProvisioningState,
+ Type,
+ UndrainableNodeBehavior,
+ UpgradeChannel,
+ WeekDay,
+ WorkloadRuntime,
+)
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+ "AbsoluteMonthlySchedule",
+ "AdvancedNetworking",
+ "AdvancedNetworkingObservability",
+ "AdvancedNetworkingSecurity",
+ "AgentPool",
+ "AgentPoolAvailableVersions",
+ "AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem",
+ "AgentPoolDeleteMachinesParameter",
+ "AgentPoolGatewayProfile",
+ "AgentPoolListResult",
+ "AgentPoolNetworkProfile",
+ "AgentPoolSecurityProfile",
+ "AgentPoolStatus",
+ "AgentPoolUpgradeProfile",
+ "AgentPoolUpgradeProfilePropertiesUpgradesItem",
+ "AgentPoolUpgradeSettings",
+ "AgentPoolWindowsProfile",
+ "AzureKeyVaultKms",
+ "ClusterUpgradeSettings",
+ "CompatibleVersions",
+ "ContainerServiceLinuxProfile",
+ "ContainerServiceNetworkProfile",
+ "ContainerServiceSshConfiguration",
+ "ContainerServiceSshPublicKey",
+ "CreationData",
+ "CredentialResult",
+ "CredentialResults",
+ "DailySchedule",
+ "DateSpan",
+ "DelegatedResource",
+ "EndpointDependency",
+ "EndpointDetail",
+ "ErrorAdditionalInfo",
+ "ErrorDetail",
+ "ErrorResponse",
+ "ExtendedLocation",
+ "GPUProfile",
+ "IPTag",
+ "IstioCertificateAuthority",
+ "IstioComponents",
+ "IstioEgressGateway",
+ "IstioIngressGateway",
+ "IstioPluginCertificateAuthority",
+ "IstioServiceMesh",
+ "KubeletConfig",
+ "KubernetesPatchVersion",
+ "KubernetesVersion",
+ "KubernetesVersionCapabilities",
+ "KubernetesVersionListResult",
+ "LinuxOSConfig",
+ "LocalDNSOverride",
+ "LocalDNSProfile",
+ "Machine",
+ "MachineIpAddress",
+ "MachineListResult",
+ "MachineNetworkProperties",
+ "MachineProperties",
+ "MaintenanceConfiguration",
+ "MaintenanceConfigurationListResult",
+ "MaintenanceWindow",
+ "ManagedCluster",
+ "ManagedClusterAADProfile",
+ "ManagedClusterAIToolchainOperatorProfile",
+ "ManagedClusterAPIServerAccessProfile",
+ "ManagedClusterAccessProfile",
+ "ManagedClusterAddonProfile",
+ "ManagedClusterAddonProfileIdentity",
+ "ManagedClusterAgentPoolProfile",
+ "ManagedClusterAgentPoolProfileProperties",
+ "ManagedClusterAutoUpgradeProfile",
+ "ManagedClusterAzureMonitorProfile",
+ "ManagedClusterAzureMonitorProfileKubeStateMetrics",
+ "ManagedClusterAzureMonitorProfileMetrics",
+ "ManagedClusterBootstrapProfile",
+ "ManagedClusterCostAnalysis",
+ "ManagedClusterHTTPProxyConfig",
+ "ManagedClusterIdentity",
+ "ManagedClusterIngressProfile",
+ "ManagedClusterIngressProfileNginx",
+ "ManagedClusterIngressProfileWebAppRouting",
+ "ManagedClusterListResult",
+ "ManagedClusterLoadBalancerProfile",
+ "ManagedClusterLoadBalancerProfileManagedOutboundIPs",
+ "ManagedClusterLoadBalancerProfileOutboundIPPrefixes",
+ "ManagedClusterLoadBalancerProfileOutboundIPs",
+ "ManagedClusterManagedOutboundIPProfile",
+ "ManagedClusterMetricsProfile",
+ "ManagedClusterNATGatewayProfile",
+ "ManagedClusterNodeProvisioningProfile",
+ "ManagedClusterNodeResourceGroupProfile",
+ "ManagedClusterOIDCIssuerProfile",
+ "ManagedClusterPodIdentity",
+ "ManagedClusterPodIdentityException",
+ "ManagedClusterPodIdentityProfile",
+ "ManagedClusterPodIdentityProvisioningError",
+ "ManagedClusterPodIdentityProvisioningErrorBody",
+ "ManagedClusterPodIdentityProvisioningInfo",
+ "ManagedClusterPoolUpgradeProfile",
+ "ManagedClusterPoolUpgradeProfileUpgradesItem",
+ "ManagedClusterPropertiesAutoScalerProfile",
+ "ManagedClusterSKU",
+ "ManagedClusterSecurityProfile",
+ "ManagedClusterSecurityProfileDefender",
+ "ManagedClusterSecurityProfileDefenderSecurityMonitoring",
+ "ManagedClusterSecurityProfileImageCleaner",
+ "ManagedClusterSecurityProfileWorkloadIdentity",
+ "ManagedClusterServicePrincipalProfile",
+ "ManagedClusterStaticEgressGatewayProfile",
+ "ManagedClusterStatus",
+ "ManagedClusterStorageProfile",
+ "ManagedClusterStorageProfileBlobCSIDriver",
+ "ManagedClusterStorageProfileDiskCSIDriver",
+ "ManagedClusterStorageProfileFileCSIDriver",
+ "ManagedClusterStorageProfileSnapshotController",
+ "ManagedClusterUpgradeProfile",
+ "ManagedClusterWindowsProfile",
+ "ManagedClusterWorkloadAutoScalerProfile",
+ "ManagedClusterWorkloadAutoScalerProfileKeda",
+ "ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler",
+ "ManagedNamespace",
+ "ManagedNamespaceListResult",
+ "ManagedServiceIdentityUserAssignedIdentitiesValue",
+ "ManualScaleProfile",
+ "MeshRevision",
+ "MeshRevisionProfile",
+ "MeshRevisionProfileList",
+ "MeshRevisionProfileProperties",
+ "MeshUpgradeProfile",
+ "MeshUpgradeProfileList",
+ "MeshUpgradeProfileProperties",
+ "NamespaceProperties",
+ "NetworkPolicies",
+ "OperationListResult",
+ "OperationValue",
+ "OutboundEnvironmentEndpoint",
+ "OutboundEnvironmentEndpointCollection",
+ "PortRange",
+ "PowerState",
+ "PrivateEndpoint",
+ "PrivateEndpointConnection",
+ "PrivateEndpointConnectionListResult",
+ "PrivateLinkResource",
+ "PrivateLinkResourcesListResult",
+ "PrivateLinkServiceConnectionState",
+ "ProxyResource",
+ "RelativeMonthlySchedule",
+ "Resource",
+ "ResourceQuota",
+ "ResourceReference",
+ "RunCommandRequest",
+ "RunCommandResult",
+ "ScaleProfile",
+ "Schedule",
+ "ServiceMeshProfile",
+ "Snapshot",
+ "SnapshotListResult",
+ "SubResource",
+ "SysctlConfig",
+ "SystemData",
+ "TagsObject",
+ "TimeInWeek",
+ "TimeSpan",
+ "TrackedResource",
+ "TrustedAccessRole",
+ "TrustedAccessRoleBinding",
+ "TrustedAccessRoleBindingListResult",
+ "TrustedAccessRoleListResult",
+ "TrustedAccessRoleRule",
+ "UpgradeOverrideSettings",
+ "UserAssignedIdentity",
+ "VirtualMachineNodes",
+ "VirtualMachinesProfile",
+ "WeeklySchedule",
+ "WindowsGmsaProfile",
+ "AdoptionPolicy",
+ "AdvancedNetworkPolicies",
+ "AgentPoolMode",
+ "AgentPoolSSHAccess",
+ "AgentPoolType",
+ "ArtifactSource",
+ "BackendPoolType",
+ "Code",
+ "ConnectionStatus",
+ "CreatedByType",
+ "DeletePolicy",
+ "Expander",
+ "ExtendedLocationTypes",
+ "Format",
+ "GPUDriver",
+ "GPUInstanceProfile",
+ "IpFamily",
+ "IstioIngressGatewayMode",
+ "KeyVaultNetworkAccessTypes",
+ "KubeletDiskType",
+ "KubernetesSupportPlan",
+ "LicenseType",
+ "LoadBalancerSku",
+ "LocalDNSForwardDestination",
+ "LocalDNSForwardPolicy",
+ "LocalDNSMode",
+ "LocalDNSProtocol",
+ "LocalDNSQueryLogging",
+ "LocalDNSServeStale",
+ "LocalDNSState",
+ "ManagedClusterPodIdentityProvisioningState",
+ "ManagedClusterSKUName",
+ "ManagedClusterSKUTier",
+ "NamespaceProvisioningState",
+ "NetworkDataplane",
+ "NetworkMode",
+ "NetworkPlugin",
+ "NetworkPluginMode",
+ "NetworkPolicy",
+ "NginxIngressControllerType",
+ "NodeOSUpgradeChannel",
+ "NodeProvisioningDefaultNodePools",
+ "NodeProvisioningMode",
+ "OSDiskType",
+ "OSSKU",
+ "OSType",
+ "OutboundType",
+ "PodIPAllocationMode",
+ "PolicyRule",
+ "PrivateEndpointConnectionProvisioningState",
+ "Protocol",
+ "PublicNetworkAccess",
+ "ResourceIdentityType",
+ "RestrictionLevel",
+ "ScaleDownMode",
+ "ScaleSetEvictionPolicy",
+ "ScaleSetPriority",
+ "ServiceMeshMode",
+ "SnapshotType",
+ "TrustedAccessRoleBindingProvisioningState",
+ "Type",
+ "UndrainableNodeBehavior",
+ "UpgradeChannel",
+ "WeekDay",
+ "WorkloadRuntime",
+]
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
+_patch_sdk()
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_container_service_client_enums.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_container_service_client_enums.py
new file mode 100644
index 00000000000..8218304f1ba
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_container_service_client_enums.py
@@ -0,0 +1,869 @@
+# pylint: disable=line-too-long,useless-suppression
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+from azure.core import CaseInsensitiveEnumMeta
+
+
+class AdoptionPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Action if Kubernetes namespace with same name already exists."""
+
+ NEVER = "Never"
+ """If the namespace already exists in Kubernetes, attempts to create that same namespace in ARM
+ will fail."""
+ IF_IDENTICAL = "IfIdentical"
+ """Take over the existing namespace to be managed by ARM, if there is no difference."""
+ ALWAYS = "Always"
+ """Always take over the existing namespace to be managed by ARM, some fields might be overwritten."""
+
+
+class AdvancedNetworkPolicies(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enable advanced network policies. This allows users to configure Layer 7 network policies
+ (FQDN, HTTP, Kafka). Policies themselves must be configured via the Cilium Network Policy
+ resources, see https://docs.cilium.io/en/latest/security/policy/index.html. This can be enabled
+ only on cilium-based clusters. If not specified, the default value is FQDN if security.enabled
+ is set to true.
+ """
+
+ L7 = "L7"
+ """Enable Layer7 network policies (FQDN, HTTP/S, Kafka). This option is a superset of the FQDN
+ option."""
+ FQDN = "FQDN"
+ """Enable FQDN based network policies"""
+ NONE = "None"
+ """Disable Layer 7 network policies (FQDN, HTTP/S, Kafka)"""
+
+
+class AgentPoolMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The mode of an agent pool. A cluster must have at least one 'System' Agent Pool at all times.
+ For additional information on agent pool restrictions and best practices, see:
+ https://docs.microsoft.com/azure/aks/use-system-pools.
+ """
+
+ SYSTEM = "System"
+ """System agent pools are primarily for hosting critical system pods such as CoreDNS and
+ metrics-server. System agent pools osType must be Linux. System agent pools VM SKU must have at
+ least 2vCPUs and 4GB of memory."""
+ USER = "User"
+ """User agent pools are primarily for hosting your application pods."""
+ GATEWAY = "Gateway"
+ """Gateway agent pools are dedicated to providing static egress IPs to pods. For more details, see
+ https://aka.ms/aks/static-egress-gateway."""
+
+
+class AgentPoolSSHAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """SSH access method of an agent pool."""
+
+ LOCAL_USER = "LocalUser"
+ """Can SSH onto the node as a local user using private key."""
+ DISABLED = "Disabled"
+ """SSH service will be turned off on the node."""
+
+
+class AgentPoolType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The type of Agent Pool."""
+
+ VIRTUAL_MACHINE_SCALE_SETS = "VirtualMachineScaleSets"
+ """Create an Agent Pool backed by a Virtual Machine Scale Set."""
+ AVAILABILITY_SET = "AvailabilitySet"
+ """Use of this is strongly discouraged."""
+ VIRTUAL_MACHINES = "VirtualMachines"
+ """Create an Agent Pool backed by a Single Instance VM orchestration mode."""
+
+
+class ArtifactSource(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The artifact source. The source where the artifacts are downloaded from."""
+
+ CACHE = "Cache"
+ """pull images from Azure Container Registry with cache"""
+ DIRECT = "Direct"
+ """pull images from Microsoft Artifact Registry"""
+
+
+class BackendPoolType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The type of the managed inbound Load Balancer BackendPool."""
+
+ NODE_IP_CONFIGURATION = "NodeIPConfiguration"
+ """The type of the managed inbound Load Balancer BackendPool.
+ https://cloud-provider-azure.sigs.k8s.io/topics/loadbalancer/#configure-load-balancer-backend."""
+ NODE_IP = "NodeIP"
+ """The type of the managed inbound Load Balancer BackendPool.
+ https://cloud-provider-azure.sigs.k8s.io/topics/loadbalancer/#configure-load-balancer-backend."""
+
+
+class Code(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Tells whether the cluster is Running or Stopped."""
+
+ RUNNING = "Running"
+ """The cluster is running."""
+ STOPPED = "Stopped"
+ """The cluster is stopped."""
+
+
+class ConnectionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The private link service connection status."""
+
+ PENDING = "Pending"
+ APPROVED = "Approved"
+ REJECTED = "Rejected"
+ DISCONNECTED = "Disconnected"
+
+
+class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The type of identity that created the resource."""
+
+ USER = "User"
+ APPLICATION = "Application"
+ MANAGED_IDENTITY = "ManagedIdentity"
+ KEY = "Key"
+
+
+class DeletePolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Delete options of a namespace."""
+
+ KEEP = "Keep"
+ """Only delete the ARM resource, keep the Kubernetes namespace. Also delete the ManagedByARM
+ label."""
+ DELETE = "Delete"
+ """Delete both the ARM resource and the Kubernetes namespace together."""
+
+
+class Expander(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The expander to use when scaling up. If not specified, the default is 'random'. See `expanders
+ `_
+ for more information.
+ """
+
+ LEAST_WASTE = "least-waste"
+ """Selects the node group that will have the least idle CPU (if tied, unused memory) after
+ scale-up. This is useful when you have different classes of nodes, for example, high CPU or
+ high memory nodes, and only want to expand those when there are pending pods that need a lot of
+ those resources."""
+ MOST_PODS = "most-pods"
+ """Selects the node group that would be able to schedule the most pods when scaling up. This is
+ useful when you are using nodeSelector to make sure certain pods land on certain nodes. Note
+ that this won't cause the autoscaler to select bigger nodes vs. smaller, as it can add multiple
+ smaller nodes at once."""
+ PRIORITY = "priority"
+ """Selects the node group that has the highest priority assigned by the user. It's configuration
+ is described in more details `here
+ `_."""
+ RANDOM = "random"
+ """Used when you don't have a particular need for the node groups to scale differently."""
+
+
+class ExtendedLocationTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The type of extendedLocation."""
+
+ EDGE_ZONE = "EdgeZone"
+
+
+class Format(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Format."""
+
+ AZURE = "azure"
+ """Return azure auth-provider kubeconfig. This format is deprecated in v1.22 and will be fully
+ removed in v1.26. See: https://aka.ms/k8s/changes-1-26."""
+ EXEC = "exec"
+ """Return exec format kubeconfig. This format requires kubelogin binary in the path."""
+ EXEC_ENUM = "exec"
+ """Return exec format kubeconfig. This format requires kubelogin binary in the path."""
+
+
+class GPUDriver(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Whether to install GPU drivers. When it's not specified, default is Install."""
+
+ INSTALL = "Install"
+ """Install driver."""
+ NONE = "None"
+ """Skip driver install."""
+
+
+class GPUInstanceProfile(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU."""
+
+ MIG1_G = "MIG1g"
+ MIG2_G = "MIG2g"
+ MIG3_G = "MIG3g"
+ MIG4_G = "MIG4g"
+ MIG7_G = "MIG7g"
+
+
+class IpFamily(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The IP version to use for cluster networking and IP assignment."""
+
+ I_PV4 = "IPv4"
+ I_PV6 = "IPv6"
+
+
+class IstioIngressGatewayMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Mode of an ingress gateway."""
+
+ EXTERNAL = "External"
+ """The ingress gateway is assigned a public IP address and is publicly accessible."""
+ INTERNAL = "Internal"
+ """The ingress gateway is assigned an internal IP address and cannot is accessed publicly."""
+
+
+class KeyVaultNetworkAccessTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Network access of the key vault. Network access of key vault. The possible values are
+ ``Public`` and ``Private``. ``Public`` means the key vault allows public access from all
+ networks. ``Private`` means the key vault disables public access and enables private link. The
+ default value is ``Public``.
+ """
+
+ PUBLIC = "Public"
+ PRIVATE = "Private"
+
+
+class KubeletDiskType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Determines the placement of emptyDir volumes, container runtime data root, and Kubelet
+ ephemeral storage.
+ """
+
+ OS = "OS"
+ """Kubelet will use the OS disk for its data."""
+ TEMPORARY = "Temporary"
+ """Kubelet will use the temporary disk for its data."""
+
+
+class KubernetesSupportPlan(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Different support tiers for AKS managed clusters."""
+
+ KUBERNETES_OFFICIAL = "KubernetesOfficial"
+ """Support for the version is the same as for the open source Kubernetes offering. Official
+ Kubernetes open source community support versions for 1 year after release."""
+ AKS_LONG_TERM_SUPPORT = "AKSLongTermSupport"
+ """Support for the version extended past the KubernetesOfficial support of 1 year. AKS continues
+ to patch CVEs for another 1 year, for a total of 2 years of support."""
+
+
+class LicenseType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The license type to use for Windows VMs. See `Azure Hybrid User Benefits
+ `_ for more details.
+ """
+
+ NONE = "None"
+ """No additional licensing is applied."""
+ WINDOWS_SERVER = "Windows_Server"
+ """Enables Azure Hybrid User Benefits for Windows VMs."""
+
+
+class LoadBalancerSku(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The load balancer sku for the managed cluster. The default is 'standard'. See `Azure Load
+ Balancer SKUs `_ for more information
+ about the differences between load balancer SKUs.
+ """
+
+ STANDARD = "standard"
+ """Use a a standard Load Balancer. This is the recommended Load Balancer SKU. For more information
+ about on working with the load balancer in the managed cluster, see the `standard Load Balancer
+ `_ article."""
+ BASIC = "basic"
+ """Use a basic Load Balancer with limited functionality."""
+
+
+class LocalDNSForwardDestination(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Destination server for DNS queries to be forwarded from localDNS."""
+
+ CLUSTER_CORE_DNS = "ClusterCoreDNS"
+ """Forward DNS queries from localDNS to cluster CoreDNS."""
+ VNET_DNS = "VnetDNS"
+ """Forward DNS queries from localDNS to DNS server configured in the VNET. A VNET can have
+ multiple DNS servers configured."""
+
+
+class LocalDNSForwardPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Forward policy for selecting upstream DNS server. See `forward plugin
+ `_ for more information.
+ """
+
+ SEQUENTIAL = "Sequential"
+ """Implements sequential upstream DNS server selection. See `forward plugin
+ `_ for more information."""
+ ROUND_ROBIN = "RoundRobin"
+ """Implements round robin upstream DNS server selection. See `forward plugin
+ `_ for more information."""
+ RANDOM = "Random"
+ """Implements random upstream DNS server selection. See `forward plugin
+ `_ for more information."""
+
+
+class LocalDNSMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Mode of enablement for localDNS."""
+
+ PREFERRED = "Preferred"
+ """If the current orchestrator version supports this feature, prefer enabling localDNS."""
+ REQUIRED = "Required"
+ """Enable localDNS."""
+ DISABLED = "Disabled"
+ """Disable localDNS."""
+
+
+class LocalDNSProtocol(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enforce TCP or prefer UDP protocol for connections from localDNS to upstream DNS server."""
+
+ PREFER_UDP = "PreferUDP"
+ """Prefer UDP protocol for connections from localDNS to upstream DNS server."""
+ FORCE_TCP = "ForceTCP"
+ """Enforce TCP protocol for connections from localDNS to upstream DNS server."""
+
+
+class LocalDNSQueryLogging(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Log level for DNS queries in localDNS."""
+
+ ERROR = "Error"
+ """Enables error logging in localDNS. See `errors plugin `_ for
+ more information."""
+ LOG = "Log"
+ """Enables query logging in localDNS. See `log plugin `_ for more
+ information."""
+
+
+class LocalDNSServeStale(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Policy for serving stale data. See `cache plugin `_ for more
+ information.
+ """
+
+ VERIFY = "Verify"
+ """Serve stale data with verification. First verify that an entry is still unavailable from the
+ source before sending the expired entry to the client. See `cache plugin
+ `_ for more information."""
+ IMMEDIATE = "Immediate"
+ """Serve stale data immediately. Send the expired entry to the client before checking to see if
+ the entry is available from the source. See `cache plugin `_
+ for more information."""
+ DISABLE = "Disable"
+ """Disable serving stale data."""
+
+
+class LocalDNSState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """System-generated state of localDNS."""
+
+ ENABLED = "Enabled"
+ """localDNS is enabled."""
+ DISABLED = "Disabled"
+ """localDNS is disabled."""
+
+
+class ManagedClusterPodIdentityProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The current provisioning state of the pod identity."""
+
+ ASSIGNED = "Assigned"
+ CANCELED = "Canceled"
+ DELETING = "Deleting"
+ FAILED = "Failed"
+ SUCCEEDED = "Succeeded"
+ UPDATING = "Updating"
+
+
+class ManagedClusterSKUName(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The name of a managed cluster SKU."""
+
+ BASE = "Base"
+ """Base option for the AKS control plane."""
+ AUTOMATIC = "Automatic"
+ """Automatic clusters are optimized to run most production workloads with configuration that
+ follows AKS best practices and recommendations for cluster and workload setup, scalability, and
+ security. For more details about Automatic clusters see aka.ms/aks/automatic."""
+
+
+class ManagedClusterSKUTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The tier of a managed cluster SKU. If not specified, the default is 'Free'. See `AKS Pricing
+ Tier `_ for more details.
+ """
+
+ PREMIUM = "Premium"
+ """Cluster has premium capabilities in addition to all of the capabilities included in 'Standard'.
+ Premium enables selection of LongTermSupport (aka.ms/aks/lts) for certain Kubernetes versions."""
+ STANDARD = "Standard"
+ """Recommended for mission-critical and production workloads. Includes Kubernetes control plane
+ autoscaling, workload-intensive testing, and up to 5,000 nodes per cluster. Guarantees 99.95%
+ availability of the Kubernetes API server endpoint for clusters that use Availability Zones and
+ 99.9% of availability for clusters that don't use Availability Zones."""
+ FREE = "Free"
+ """The cluster management is free, but charged for VM, storage, and networking usage. Best for
+ experimenting, learning, simple testing, or workloads with fewer than 10 nodes. Not recommended
+ for production use cases."""
+
+
+class NamespaceProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The current provisioning state of the namespace."""
+
+ UPDATING = "Updating"
+ DELETING = "Deleting"
+ CREATING = "Creating"
+ SUCCEEDED = "Succeeded"
+ FAILED = "Failed"
+ CANCELED = "Canceled"
+
+
+class NetworkDataplane(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Network dataplane used in the Kubernetes cluster."""
+
+ AZURE = "azure"
+ """Use Azure network dataplane."""
+ CILIUM = "cilium"
+ """Use Cilium network dataplane. See `Azure CNI Powered by Cilium
+ `_ for more information."""
+
+
+class NetworkMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The network mode Azure CNI is configured with. This cannot be specified if networkPlugin is
+ anything other than 'azure'.
+ """
+
+ TRANSPARENT = "transparent"
+ """No bridge is created. Intra-VM Pod to Pod communication is through IP routes created by Azure
+ CNI. See `Transparent Mode `_ for
+ more information."""
+ BRIDGE = "bridge"
+ """This is no longer supported"""
+
+
+class NetworkPlugin(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Network plugin used for building the Kubernetes network."""
+
+ AZURE = "azure"
+ """Use the Azure CNI network plugin. See `Azure CNI (advanced) networking
+ `_ for
+ more information."""
+ KUBENET = "kubenet"
+ """Use the Kubenet network plugin. See `Kubenet (basic) networking
+ `_ for more
+ information."""
+ NONE = "none"
+ """No CNI plugin is pre-installed. See `BYO CNI
+ `_ for more information."""
+
+
+class NetworkPluginMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The mode the network plugin should use."""
+
+ OVERLAY = "overlay"
+ """Used with networkPlugin=azure, pods are given IPs from the PodCIDR address space but use Azure
+ Routing Domains rather than Kubenet's method of route tables. For more information visit
+ https://aka.ms/aks/azure-cni-overlay."""
+
+
+class NetworkPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Network policy used for building the Kubernetes network."""
+
+ NONE = "none"
+ """Network policies will not be enforced. This is the default value when NetworkPolicy is not
+ specified."""
+ CALICO = "calico"
+ """Use Calico network policies. See `differences between Azure and Calico policies
+ `_
+ for more information."""
+ AZURE = "azure"
+ """Use Azure network policies. See `differences between Azure and Calico policies
+ `_
+ for more information."""
+ CILIUM = "cilium"
+ """Use Cilium to enforce network policies. This requires networkDataplane to be 'cilium'."""
+
+
+class NginxIngressControllerType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Ingress type for the default NginxIngressController custom resource."""
+
+ ANNOTATION_CONTROLLED = "AnnotationControlled"
+ """The default NginxIngressController will be created. Users can edit the default
+ NginxIngressController Custom Resource to configure load balancer annotations."""
+ EXTERNAL = "External"
+ """The default NginxIngressController will be created and the operator will provision an external
+ loadbalancer with it. Any annotation to make the default loadbalancer internal will be
+ overwritten."""
+ INTERNAL = "Internal"
+ """The default NginxIngressController will be created and the operator will provision an internal
+ loadbalancer with it. Any annotation to make the default loadbalancer external will be
+ overwritten."""
+ NONE = "None"
+ """The default Ingress Controller will not be created. It will not be deleted by the system if it
+ exists. Users should delete the default NginxIngressController Custom Resource manually if
+ desired."""
+
+
+class NodeOSUpgradeChannel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Node OS Upgrade Channel. Manner in which the OS on your nodes is updated. The default is
+ NodeImage.
+ """
+
+ NONE = "None"
+ """No attempt to update your machines OS will be made either by OS or by rolling VHDs. This means
+ you are responsible for your security updates"""
+ UNMANAGED = "Unmanaged"
+ """OS updates will be applied automatically through the OS built-in patching infrastructure. Newly
+ scaled in machines will be unpatched initially and will be patched at some point by the OS's
+ infrastructure. Behavior of this option depends on the OS in question. Ubuntu and Mariner apply
+ security patches through unattended upgrade roughly once a day around 06:00 UTC. Windows does
+ not apply security patches automatically and so for them this option is equivalent to None till
+ further notice"""
+ NODE_IMAGE = "NodeImage"
+ """AKS will update the nodes with a newly patched VHD containing security fixes and bugfixes on a
+ weekly cadence. With the VHD update machines will be rolling reimaged to that VHD following
+ maintenance windows and surge settings. No extra VHD cost is incurred when choosing this option
+ as AKS hosts the images."""
+ SECURITY_PATCH = "SecurityPatch"
+ """AKS downloads and updates the nodes with tested security updates. These updates honor the
+ maintenance window settings and produce a new VHD that is used on new nodes. On some occasions
+ it's not possible to apply the updates in place, in such cases the existing nodes will also be
+ re-imaged to the newly produced VHD in order to apply the changes. This option incurs an extra
+ cost of hosting the new Security Patch VHDs in your resource group for just in time
+ consumption."""
+
+
+class NodeProvisioningDefaultNodePools(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The set of default Karpenter NodePools (CRDs) configured for node provisioning. This field has
+ no effect unless mode is 'Auto'. Warning: Changing this from Auto to None on an existing
+ cluster will cause the default Karpenter NodePools to be deleted, which will drain and delete
+ the nodes associated with those pools. It is strongly recommended to not do this unless there
+ are idle nodes ready to take the pods evicted by that action. If not specified, the default is
+ Auto. For more information see aka.ms/aks/nap#node-pools.
+ """
+
+ NONE = "None"
+ """No Karpenter NodePools are provisioned automatically. Automatic scaling will not happen unless
+ the user creates one or more NodePool CRD instances."""
+ AUTO = "Auto"
+ """A standard set of Karpenter NodePools are provisioned"""
+
+
+class NodeProvisioningMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The node provisioning mode. If not specified, the default is Manual."""
+
+ MANUAL = "Manual"
+ """Nodes are provisioned manually by the user"""
+ AUTO = "Auto"
+ """Nodes are provisioned automatically by AKS using Karpenter (See aka.ms/aks/nap for more
+ details). Fixed size Node Pools can still be created, but autoscaling Node Pools cannot be.
+ (See aka.ms/aks/nap for more details)."""
+
+
+class OSDiskType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The OS disk type to be used for machines in the agent pool. The default is 'Ephemeral' if the
+ VM supports it and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults
+ to 'Managed'. May not be changed after creation. For more information see `Ephemeral OS
+ `_.
+ """
+
+ MANAGED = "Managed"
+ """Azure replicates the operating system disk for a virtual machine to Azure storage to avoid data
+ loss should the VM need to be relocated to another host. Since containers aren't designed to
+ have local state persisted, this behavior offers limited value while providing some drawbacks,
+ including slower node provisioning and higher read/write latency."""
+ EPHEMERAL = "Ephemeral"
+ """Ephemeral OS disks are stored only on the host machine, just like a temporary disk. This
+ provides lower read/write latency, along with faster node scaling and cluster upgrades."""
+
+
+class OSSKU(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is Linux. The
+ default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >= 1.25 if OSType
+ is Windows.
+ """
+
+ UBUNTU = "Ubuntu"
+ """Use Ubuntu as the OS for node images."""
+ AZURE_LINUX = "AzureLinux"
+ """Use AzureLinux as the OS for node images. Azure Linux is a container-optimized Linux distro
+ built by Microsoft, visit https://aka.ms/azurelinux for more information."""
+ AZURE_LINUX3 = "AzureLinux3"
+ """Use AzureLinux3 as the OS for node images. Azure Linux is a container-optimized Linux distro
+ built by Microsoft, visit https://aka.ms/azurelinux for more information. For limitations,
+ visit https://aka.ms/aks/node-images. For OS migration guidance, see
+ https://aka.ms/aks/upgrade-os-version."""
+ CBL_MARINER = "CBLMariner"
+ """Deprecated OSSKU. Microsoft recommends that new deployments choose 'AzureLinux' instead."""
+ WINDOWS2019 = "Windows2019"
+ """Use Windows2019 as the OS for node images. Unsupported for system node pools. Windows2019 only
+ supports Windows2019 containers; it cannot run Windows2022 containers and vice versa."""
+ WINDOWS2022 = "Windows2022"
+ """Use Windows2022 as the OS for node images. Unsupported for system node pools. Windows2022 only
+ supports Windows2022 containers; it cannot run Windows2019 containers and vice versa."""
+ UBUNTU2204 = "Ubuntu2204"
+ """Use Ubuntu2204 as the OS for node images, however, Ubuntu 22.04 may not be supported for all
+ nodepools. For limitations and supported kubernetes versions, see
+ https://aka.ms/aks/supported-ubuntu-versions"""
+ UBUNTU2404 = "Ubuntu2404"
+ """Use Ubuntu2404 as the OS for node images, however, Ubuntu 24.04 may not be supported for all
+ nodepools. For limitations and supported kubernetes versions, see
+ https://aka.ms/aks/supported-ubuntu-versions"""
+
+
+class OSType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The operating system type. The default is Linux."""
+
+ LINUX = "Linux"
+ """Use Linux."""
+ WINDOWS = "Windows"
+ """Use Windows."""
+
+
+class OutboundType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The outbound (egress) routing method. This can only be set at cluster creation time and cannot
+ be changed later. For more information see `egress outbound type
+ `_.
+ """
+
+ LOAD_BALANCER = "loadBalancer"
+ """The load balancer is used for egress through an AKS assigned public IP. This supports
+ Kubernetes services of type 'loadBalancer'. For more information see `outbound type
+ loadbalancer
+ `_."""
+ USER_DEFINED_ROUTING = "userDefinedRouting"
+ """Egress paths must be defined by the user. This is an advanced scenario and requires proper
+ network configuration. For more information see `outbound type userDefinedRouting
+ `_."""
+ MANAGED_NAT_GATEWAY = "managedNATGateway"
+ """The AKS-managed NAT gateway is used for egress."""
+ USER_ASSIGNED_NAT_GATEWAY = "userAssignedNATGateway"
+ """The user-assigned NAT gateway associated to the cluster subnet is used for egress. This is an
+ advanced scenario and requires proper network configuration."""
+ NONE = "none"
+ """The AKS cluster is not set with any outbound-type. All AKS nodes follows Azure VM default
+ outbound behavior. Please refer to
+ https://azure.microsoft.com/en-us/updates/default-outbound-access-for-vms-in-azure-will-be-retired-transition-to-a-new-method-of-internet-access/"""
+
+
+class PodIPAllocationMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Pod IP Allocation Mode. The IP allocation mode for pods in the agent pool. Must be used with
+ podSubnetId. The default is 'DynamicIndividual'.
+ """
+
+ DYNAMIC_INDIVIDUAL = "DynamicIndividual"
+ """Each node gets allocated with a non-contiguous list of IP addresses assignable to pods. This is
+ better for maximizing a small to medium subnet of size /16 or smaller. The Azure CNI cluster
+ with dynamic IP allocation defaults to this mode if the customer does not explicitly specify a
+ podIPAllocationMode"""
+ STATIC_BLOCK = "StaticBlock"
+ """Each node is statically allocated CIDR block(s) of size /28 = 16 IPs per block to satisfy the
+ maxPods per node. Number of CIDR blocks >= (maxPods / 16). The block, rather than a single IP,
+ counts against the Azure Vnet Private IP limit of 65K. Therefore block mode is suitable for
+ running larger workloads with more than the current limit of 65K pods in a cluster. This mode
+ is better suited to scale with larger subnets of /15 or bigger"""
+
+
+class PolicyRule(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Enum representing different network policy rules."""
+
+ DENY_ALL = "DenyAll"
+ """Deny all network traffic."""
+ ALLOW_ALL = "AllowAll"
+ """Allow all network traffic."""
+ ALLOW_SAME_NAMESPACE = "AllowSameNamespace"
+ """Allow traffic within the same namespace."""
+
+
+class PrivateEndpointConnectionProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The current provisioning state."""
+
+ CANCELED = "Canceled"
+ CREATING = "Creating"
+ DELETING = "Deleting"
+ FAILED = "Failed"
+ SUCCEEDED = "Succeeded"
+
+
+class Protocol(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The network protocol of the port."""
+
+ TCP = "TCP"
+ """TCP protocol."""
+ UDP = "UDP"
+ """UDP protocol."""
+
+
+class PublicNetworkAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """PublicNetworkAccess of the managedCluster. Allow or deny public network access for AKS."""
+
+ ENABLED = "Enabled"
+ DISABLED = "Disabled"
+
+
+class ResourceIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The type of identity used for the managed cluster. For more information see `use managed
+ identities in AKS `_.
+ """
+
+ SYSTEM_ASSIGNED = "SystemAssigned"
+ """Use an implicitly created system assigned managed identity to manage cluster resources. Master
+ components in the control plane such as kube-controller-manager will use the system assigned
+ managed identity to manipulate Azure resources."""
+ USER_ASSIGNED = "UserAssigned"
+ """Use a user-specified identity to manage cluster resources. Master components in the control
+ plane such as kube-controller-manager will use the specified user assigned managed identity to
+ manipulate Azure resources."""
+ NONE = "None"
+ """Do not use a managed identity for the Managed Cluster, service principal will be used instead."""
+
+
+class RestrictionLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The restriction level applied to the cluster's node resource group. If not specified, the
+ default is 'Unrestricted'.
+ """
+
+ UNRESTRICTED = "Unrestricted"
+ """All RBAC permissions are allowed on the managed node resource group"""
+ READ_ONLY = "ReadOnly"
+ """Only */read RBAC permissions allowed on the managed node resource group"""
+
+
+class ScaleDownMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Describes how VMs are added to or removed from Agent Pools. See `billing states
+ `_.
+ """
+
+ DELETE = "Delete"
+ """Create new instances during scale up and remove instances during scale down."""
+ DEALLOCATE = "Deallocate"
+ """Attempt to start deallocated instances (if they exist) during scale up and deallocate instances
+ during scale down."""
+
+
+class ScaleSetEvictionPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The Virtual Machine Scale Set eviction policy. The eviction policy specifies what to do with
+ the VM when it is evicted. The default is Delete. For more information about eviction see `spot
+ VMs `_.
+ """
+
+ DELETE = "Delete"
+ """Nodes in the underlying Scale Set of the node pool are deleted when they're evicted."""
+ DEALLOCATE = "Deallocate"
+ """Nodes in the underlying Scale Set of the node pool are set to the stopped-deallocated state
+ upon eviction. Nodes in the stopped-deallocated state count against your compute quota and can
+ cause issues with cluster scaling or upgrading."""
+
+
+class ScaleSetPriority(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The Virtual Machine Scale Set priority."""
+
+ SPOT = "Spot"
+ """Spot priority VMs will be used. There is no SLA for spot nodes. See `spot on AKS
+ `_ for more information."""
+ REGULAR = "Regular"
+ """Regular VMs will be used."""
+
+
+class ServiceMeshMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Mode of the service mesh."""
+
+ ISTIO = "Istio"
+ """Istio deployed as an AKS addon."""
+ DISABLED = "Disabled"
+ """Mesh is disabled."""
+
+
+class SnapshotType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The type of a snapshot. The default is NodePool."""
+
+ NODE_POOL = "NodePool"
+ """The snapshot is a snapshot of a node pool."""
+
+
+class TrustedAccessRoleBindingProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The current provisioning state of trusted access role binding."""
+
+ CANCELED = "Canceled"
+ DELETING = "Deleting"
+ FAILED = "Failed"
+ SUCCEEDED = "Succeeded"
+ UPDATING = "Updating"
+
+
+class Type(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The week index. Specifies on which week of the month the dayOfWeek applies."""
+
+ FIRST = "First"
+ """First week of the month."""
+ SECOND = "Second"
+ """Second week of the month."""
+ THIRD = "Third"
+ """Third week of the month."""
+ FOURTH = "Fourth"
+ """Fourth week of the month."""
+ LAST = "Last"
+ """Last week of the month."""
+
+
+class UndrainableNodeBehavior(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Defines the behavior for undrainable nodes during upgrade. The most common cause of undrainable
+ nodes is Pod Disruption Budgets (PDBs), but other issues, such as pod termination grace period
+ is exceeding the remaining per-node drain timeout or pod is still being in a running state, can
+ also cause undrainable nodes.
+ """
+
+ CORDON = "Cordon"
+ """AKS will cordon the blocked nodes and replace them with surge nodes during upgrade. The blocked
+ nodes will be cordoned and replaced by surge nodes. The blocked nodes will have label
+ 'kubernetes.azure.com/upgrade-status:Quarantined'. A surge node will be retained for each
+ blocked node. A best-effort attempt will be made to delete all other surge nodes. If there are
+ enough surge nodes to replace blocked nodes, then the upgrade operation and the managed cluster
+ will be in failed state. Otherwise, the upgrade operation and the managed cluster will be in
+ canceled state."""
+ SCHEDULE = "Schedule"
+ """AKS will mark the blocked nodes schedulable, but the blocked nodes are not upgraded. A
+ best-effort attempt will be made to delete all surge nodes. The upgrade operation and the
+ managed cluster will be in failed state if there are any blocked nodes."""
+
+
+class UpgradeChannel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The upgrade channel for auto upgrade. The default is 'none'. For more information see `setting
+ the AKS cluster auto-upgrade channel
+ `_.
+ """
+
+ RAPID = "rapid"
+ """Automatically upgrade the cluster to the latest supported patch release on the latest supported
+ minor version. In cases where the cluster is at a version of Kubernetes that is at an N-2 minor
+ version where N is the latest supported minor version, the cluster first upgrades to the latest
+ supported patch version on N-1 minor version. For example, if a cluster is running version
+ 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster first is
+ upgraded to 1.18.6, then is upgraded to 1.19.1."""
+ STABLE = "stable"
+ """Automatically upgrade the cluster to the latest supported patch release on minor version N-1,
+ where N is the latest supported minor version. For example, if a cluster is running version
+ 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster is upgraded
+ to 1.18.6."""
+ PATCH = "patch"
+ """Automatically upgrade the cluster to the latest supported patch version when it becomes
+ available while keeping the minor version the same. For example, if a cluster is running
+ version 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster is
+ upgraded to 1.17.9."""
+ NODE_IMAGE = "node-image"
+ """Automatically upgrade the node image to the latest version available. Consider using
+ nodeOSUpgradeChannel instead as that allows you to configure node OS patching separate from
+ Kubernetes version patching"""
+ NONE = "none"
+ """Disables auto-upgrades and keeps the cluster at its current version of Kubernetes."""
+
+
+class WeekDay(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The weekday enum."""
+
+ SUNDAY = "Sunday"
+ MONDAY = "Monday"
+ TUESDAY = "Tuesday"
+ WEDNESDAY = "Wednesday"
+ THURSDAY = "Thursday"
+ FRIDAY = "Friday"
+ SATURDAY = "Saturday"
+
+
+class WorkloadRuntime(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Determines the type of workload a node can run."""
+
+ OCI_CONTAINER = "OCIContainer"
+ """Nodes will use Kubelet to run standard OCI container workloads."""
+ WASM_WASI = "WasmWasi"
+ """Nodes will use Krustlet to run WASM workloads using the WASI provider (Preview)."""
+ KATA_VM_ISOLATION = "KataVmIsolation"
+ """Nodes can use (Kata + Cloud Hypervisor + Hyper-V) to enable Nested VM-based pods. Due to the
+ use Hyper-V, AKS node OS itself is a nested VM (the root OS) of Hyper-V. Thus it can only be
+ used with VM series that support Nested Virtualization such as Dv3 series."""
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_models_py3.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_models_py3.py
new file mode 100644
index 00000000000..9bcc668831d
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_models_py3.py
@@ -0,0 +1,9636 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from collections.abc import MutableMapping
+import datetime
+from typing import Any, Optional, TYPE_CHECKING, Union
+
+from .._utils import serialization as _serialization
+
+if TYPE_CHECKING:
+ from .. import models as _models
+JSON = MutableMapping[str, Any]
+
+
+class AbsoluteMonthlySchedule(_serialization.Model):
+ """For schedules like: 'recur every month on the 15th' or 'recur every 3 months on the 20th'.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar interval_months: Specifies the number of months between each set of occurrences.
+ Required.
+ :vartype interval_months: int
+ :ivar day_of_month: The date of the month. Required.
+ :vartype day_of_month: int
+ """
+
+ _validation = {
+ "interval_months": {"required": True, "maximum": 6, "minimum": 1},
+ "day_of_month": {"required": True, "maximum": 31, "minimum": 1},
+ }
+
+ _attribute_map = {
+ "interval_months": {"key": "intervalMonths", "type": "int"},
+ "day_of_month": {"key": "dayOfMonth", "type": "int"},
+ }
+
+ def __init__(self, *, interval_months: int, day_of_month: int, **kwargs: Any) -> None:
+ """
+ :keyword interval_months: Specifies the number of months between each set of occurrences.
+ Required.
+ :paramtype interval_months: int
+ :keyword day_of_month: The date of the month. Required.
+ :paramtype day_of_month: int
+ """
+ super().__init__(**kwargs)
+ self.interval_months = interval_months
+ self.day_of_month = day_of_month
+
+
+class AdvancedNetworking(_serialization.Model):
+ """Advanced Networking profile for enabling observability and security feature suite on a cluster.
+ For more information see aka.ms/aksadvancednetworking.
+
+ :ivar enabled: Indicates the enablement of Advanced Networking functionalities of observability
+ and security on AKS clusters. When this is set to true, all observability and security features
+ will be set to enabled unless explicitly disabled. If not specified, the default is false.
+ :vartype enabled: bool
+ :ivar observability: Observability profile to enable advanced network metrics and flow logs
+ with historical contexts.
+ :vartype observability: ~azure.mgmt.containerservice.models.AdvancedNetworkingObservability
+ :ivar security: Security profile to enable security features on cilium based cluster.
+ :vartype security: ~azure.mgmt.containerservice.models.AdvancedNetworkingSecurity
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "observability": {"key": "observability", "type": "AdvancedNetworkingObservability"},
+ "security": {"key": "security", "type": "AdvancedNetworkingSecurity"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enabled: Optional[bool] = None,
+ observability: Optional["_models.AdvancedNetworkingObservability"] = None,
+ security: Optional["_models.AdvancedNetworkingSecurity"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enabled: Indicates the enablement of Advanced Networking functionalities of
+ observability and security on AKS clusters. When this is set to true, all observability and
+ security features will be set to enabled unless explicitly disabled. If not specified, the
+ default is false.
+ :paramtype enabled: bool
+ :keyword observability: Observability profile to enable advanced network metrics and flow logs
+ with historical contexts.
+ :paramtype observability: ~azure.mgmt.containerservice.models.AdvancedNetworkingObservability
+ :keyword security: Security profile to enable security features on cilium based cluster.
+ :paramtype security: ~azure.mgmt.containerservice.models.AdvancedNetworkingSecurity
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.observability = observability
+ self.security = security
+
+
+class AdvancedNetworkingObservability(_serialization.Model):
+ """Observability profile to enable advanced network metrics and flow logs with historical
+ contexts.
+
+ :ivar enabled: Indicates the enablement of Advanced Networking observability functionalities on
+ clusters.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Indicates the enablement of Advanced Networking observability functionalities
+ on clusters.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class AdvancedNetworkingSecurity(_serialization.Model):
+ """Security profile to enable security features on cilium based cluster.
+
+ :ivar enabled: This feature allows user to configure network policy based on DNS (FQDN) names.
+ It can be enabled only on cilium based clusters. If not specified, the default is false.
+ :vartype enabled: bool
+ :ivar advanced_network_policies: Enable advanced network policies. This allows users to
+ configure Layer 7 network policies (FQDN, HTTP, Kafka). Policies themselves must be configured
+ via the Cilium Network Policy resources, see
+ https://docs.cilium.io/en/latest/security/policy/index.html. This can be enabled only on
+ cilium-based clusters. If not specified, the default value is FQDN if security.enabled is set
+ to true. Known values are: "L7", "FQDN", and "None".
+ :vartype advanced_network_policies: str or
+ ~azure.mgmt.containerservice.models.AdvancedNetworkPolicies
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "advanced_network_policies": {"key": "advancedNetworkPolicies", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enabled: Optional[bool] = None,
+ advanced_network_policies: Optional[Union[str, "_models.AdvancedNetworkPolicies"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enabled: This feature allows user to configure network policy based on DNS (FQDN)
+ names. It can be enabled only on cilium based clusters. If not specified, the default is false.
+ :paramtype enabled: bool
+ :keyword advanced_network_policies: Enable advanced network policies. This allows users to
+ configure Layer 7 network policies (FQDN, HTTP, Kafka). Policies themselves must be configured
+ via the Cilium Network Policy resources, see
+ https://docs.cilium.io/en/latest/security/policy/index.html. This can be enabled only on
+ cilium-based clusters. If not specified, the default value is FQDN if security.enabled is set
+ to true. Known values are: "L7", "FQDN", and "None".
+ :paramtype advanced_network_policies: str or
+ ~azure.mgmt.containerservice.models.AdvancedNetworkPolicies
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.advanced_network_policies = advanced_network_policies
+
+
+class SubResource(_serialization.Model):
+ """Reference to another subresource.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Resource ID.
+ :vartype id: str
+ :ivar name: The name of the resource that is unique within a resource group. This name can be
+ used to access the resource.
+ :vartype name: str
+ :ivar type: Resource type.
+ :vartype type: str
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.id: Optional[str] = None
+ self.name: Optional[str] = None
+ self.type: Optional[str] = None
+
+
+class AgentPool(SubResource):
+ """Agent Pool.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Resource ID.
+ :vartype id: str
+ :ivar name: The name of the resource that is unique within a resource group. This name can be
+ used to access the resource.
+ :vartype name: str
+ :ivar type: Resource type.
+ :vartype type: str
+ :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value
+ will change when the resource is updated. Specify an if-match or if-none-match header with the
+ eTag value for a subsequent request to enable optimistic concurrency per the normal eTag
+ convention.
+ :vartype e_tag: str
+ :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the
+ range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for
+ system pools. The default value is 1.
+ :vartype count: int
+ :ivar vm_size: The size of the agent pool VMs. VM size availability varies by region. If a node
+ contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly.
+ For more details on restricted VM sizes, see:
+ https://docs.microsoft.com/azure/aks/quotas-skus-regions.
+ :vartype vm_size: str
+ :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine
+ in the master/agent pool. If you specify 0, it will apply the default osDisk size according to
+ the vmSize specified.
+ :vartype os_disk_size_gb: int
+ :ivar os_disk_type: The OS disk type to be used for machines in the agent pool. The default is
+ 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB.
+ Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see
+ `Ephemeral OS `_.
+ Known values are: "Managed" and "Ephemeral".
+ :vartype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType
+ :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data
+ root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary".
+ :vartype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType
+ :ivar workload_runtime: Determines the type of workload a node can run. Known values are:
+ "OCIContainer", "WasmWasi", and "KataVmIsolation".
+ :vartype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime
+ :ivar message_of_the_day: Message of the day for Linux nodes, base64-encoded. A base64-encoded
+ string which will be written to /etc/motd after decoding. This allows customization of the
+ message of the day for Linux nodes. It must not be specified for Windows nodes. It must be a
+ static string (i.e., will be printed raw and not be executed as a script).
+ :vartype message_of_the_day: str
+ :ivar vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will join
+ on startup. If this is not specified, a VNET and subnet will be generated and used. If no
+ podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :vartype vnet_subnet_id: str
+ :ivar pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, pod
+ IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of
+ the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :vartype pod_subnet_id: str
+ :ivar pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the
+ agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values
+ are: "DynamicIndividual" and "StaticBlock".
+ :vartype pod_ip_allocation_mode: str or ~azure.mgmt.containerservice.models.PodIPAllocationMode
+ :ivar max_pods: The maximum number of pods that can run on a node.
+ :vartype max_pods: int
+ :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and
+ "Windows".
+ :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is
+ Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >=
+ 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3",
+ "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404".
+ :vartype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU
+ :ivar max_count: The maximum number of nodes for auto-scaling.
+ :vartype max_count: int
+ :ivar min_count: The minimum number of nodes for auto-scaling.
+ :vartype min_count: int
+ :ivar enable_auto_scaling: Whether to enable auto-scaler.
+ :vartype enable_auto_scaling: bool
+ :ivar scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also
+ effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values
+ are: "Delete" and "Deallocate".
+ :vartype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode
+ :ivar type_properties_type: The type of Agent Pool. Known values are:
+ "VirtualMachineScaleSets", "AvailabilitySet", and "VirtualMachines".
+ :vartype type_properties_type: str or ~azure.mgmt.containerservice.models.AgentPoolType
+ :ivar mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool at
+ all times. For additional information on agent pool restrictions and best practices, see:
+ https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and
+ "Gateway".
+ :vartype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode
+ :ivar orchestrator_version: The version of Kubernetes specified by the user. Both patch version
+ (e.g. 1.20.13) and (e.g. 1.20) are supported. When
+ is specified, the latest supported GA patch version is chosen automatically.
+ Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14)
+ will not trigger an upgrade, even if a newer patch version is available. As a best practice,
+ you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node
+ pool version must have the same major version as the control plane. The node pool minor version
+ must be within two minor versions of the control plane version. The node pool version cannot be
+ greater than the control plane version. For more information see `upgrading a node pool
+ `_.
+ :vartype orchestrator_version: str
+ :ivar current_orchestrator_version: The version of Kubernetes the Agent Pool is running. If
+ orchestratorVersion is a fully specified version , this field will be
+ exactly equal to it. If orchestratorVersion is , this field will contain the full
+ version being used.
+ :vartype current_orchestrator_version: str
+ :ivar node_image_version: The version of node image.
+ :vartype node_image_version: str
+ :ivar upgrade_settings: Settings for upgrading the agentpool.
+ :vartype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings
+ :ivar provisioning_state: The current deployment or provisioning state.
+ :vartype provisioning_state: str
+ :ivar power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first
+ created it is initially Running. The Agent Pool can be stopped by setting this field to
+ Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An
+ Agent Pool can only be stopped if it is Running and provisioning state is Succeeded.
+ :vartype power_state: ~azure.mgmt.containerservice.models.PowerState
+ :ivar availability_zones: The list of Availability zones to use for nodes. This can only be
+ specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
+ :vartype availability_zones: list[str]
+ :ivar enable_node_public_ip: Whether each node is allocated its own public IP. Some scenarios
+ may require nodes in a node pool to receive their own dedicated public IP addresses. A common
+ scenario is for gaming workloads, where a console needs to make a direct connection to a cloud
+ virtual machine to minimize hops. For more information see `assigning a public IP per node
+ `_.
+ The default is false.
+ :vartype enable_node_public_ip: bool
+ :ivar node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}.
+ :vartype node_public_ip_prefix_id: str
+ :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default
+ is 'Regular'. Known values are: "Spot" and "Regular".
+ :vartype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority
+ :ivar scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This
+ cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is
+ 'Delete'. Known values are: "Delete" and "Deallocate".
+ :vartype scale_set_eviction_policy: str or
+ ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy
+ :ivar spot_max_price: The max price (in US Dollars) you are willing to pay for spot instances.
+ Possible values are any decimal value greater than zero or -1 which indicates default price to
+ be up-to on-demand. Possible values are any decimal value greater than zero or -1 which
+ indicates the willingness to pay any on-demand price. For more details on spot pricing, see
+ `spot VMs pricing `_.
+ :vartype spot_max_price: float
+ :ivar tags: The tags to be persisted on the agent pool virtual machine scale set.
+ :vartype tags: dict[str, str]
+ :ivar node_labels: The node labels to be persisted across all nodes in agent pool.
+ :vartype node_labels: dict[str, str]
+ :ivar node_taints: The taints added to new nodes during node pool create and scale. For
+ example, key=value:NoSchedule.
+ :vartype node_taints: list[str]
+ :ivar proximity_placement_group_id: The ID for Proximity Placement Group.
+ :vartype proximity_placement_group_id: str
+ :ivar kubelet_config: The Kubelet configuration on the agent pool nodes.
+ :vartype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig
+ :ivar linux_os_config: The OS configuration of Linux agent nodes.
+ :vartype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig
+ :ivar enable_encryption_at_host: Whether to enable host based OS and data drive encryption.
+ This is only supported on certain VM sizes and in certain Azure regions. For more information,
+ see: https://docs.microsoft.com/azure/aks/enable-host-encryption.
+ :vartype enable_encryption_at_host: bool
+ :ivar enable_ultra_ssd: Whether to enable UltraSSD.
+ :vartype enable_ultra_ssd: bool
+ :ivar enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool
+ `_
+ for more details.
+ :vartype enable_fips: bool
+ :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile
+ for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g".
+ :vartype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile
+ :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool
+ will be created/upgraded using a snapshot.
+ :vartype creation_data: ~azure.mgmt.containerservice.models.CreationData
+ :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the
+ Capacity Reservation Group.
+ :vartype capacity_reservation_group_id: str
+ :ivar host_group_id: The fully qualified resource ID of the Dedicated Host Group to provision
+ virtual machines from, used only in creation scenario and not allowed to changed once set. This
+ is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}.
+ For more information see `Azure dedicated hosts
+ `_.
+ :vartype host_group_id: str
+ :ivar network_profile: Network-related settings of an agent pool.
+ :vartype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile
+ :ivar windows_profile: The Windows agent pool's specific profile.
+ :vartype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile
+ :ivar security_profile: The security settings of an agent pool.
+ :vartype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile
+ :ivar gpu_profile: GPU settings for the Agent Pool.
+ :vartype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile
+ :ivar gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field
+ cannot be set if agent pool mode is not Gateway.
+ :vartype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile
+ :ivar virtual_machines_profile: Specifications on VirtualMachines agent pool.
+ :vartype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile
+ :ivar virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool.
+ :vartype virtual_machine_nodes_status:
+ list[~azure.mgmt.containerservice.models.VirtualMachineNodes]
+ :ivar status: Contains read-only information about the Agent Pool.
+ :vartype status: ~azure.mgmt.containerservice.models.AgentPoolStatus
+ :ivar local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS overrides.
+ LocalDNS helps improve performance and reliability of DNS resolution in an AKS cluster. For
+ more details see aka.ms/aks/localdns.
+ :vartype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "e_tag": {"readonly": True},
+ "os_disk_size_gb": {"maximum": 2048, "minimum": 0},
+ "current_orchestrator_version": {"readonly": True},
+ "node_image_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "e_tag": {"key": "properties.eTag", "type": "str"},
+ "count": {"key": "properties.count", "type": "int"},
+ "vm_size": {"key": "properties.vmSize", "type": "str"},
+ "os_disk_size_gb": {"key": "properties.osDiskSizeGB", "type": "int"},
+ "os_disk_type": {"key": "properties.osDiskType", "type": "str"},
+ "kubelet_disk_type": {"key": "properties.kubeletDiskType", "type": "str"},
+ "workload_runtime": {"key": "properties.workloadRuntime", "type": "str"},
+ "message_of_the_day": {"key": "properties.messageOfTheDay", "type": "str"},
+ "vnet_subnet_id": {"key": "properties.vnetSubnetID", "type": "str"},
+ "pod_subnet_id": {"key": "properties.podSubnetID", "type": "str"},
+ "pod_ip_allocation_mode": {"key": "properties.podIPAllocationMode", "type": "str"},
+ "max_pods": {"key": "properties.maxPods", "type": "int"},
+ "os_type": {"key": "properties.osType", "type": "str"},
+ "os_sku": {"key": "properties.osSKU", "type": "str"},
+ "max_count": {"key": "properties.maxCount", "type": "int"},
+ "min_count": {"key": "properties.minCount", "type": "int"},
+ "enable_auto_scaling": {"key": "properties.enableAutoScaling", "type": "bool"},
+ "scale_down_mode": {"key": "properties.scaleDownMode", "type": "str"},
+ "type_properties_type": {"key": "properties.type", "type": "str"},
+ "mode": {"key": "properties.mode", "type": "str"},
+ "orchestrator_version": {"key": "properties.orchestratorVersion", "type": "str"},
+ "current_orchestrator_version": {"key": "properties.currentOrchestratorVersion", "type": "str"},
+ "node_image_version": {"key": "properties.nodeImageVersion", "type": "str"},
+ "upgrade_settings": {"key": "properties.upgradeSettings", "type": "AgentPoolUpgradeSettings"},
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "power_state": {"key": "properties.powerState", "type": "PowerState"},
+ "availability_zones": {"key": "properties.availabilityZones", "type": "[str]"},
+ "enable_node_public_ip": {"key": "properties.enableNodePublicIP", "type": "bool"},
+ "node_public_ip_prefix_id": {"key": "properties.nodePublicIPPrefixID", "type": "str"},
+ "scale_set_priority": {"key": "properties.scaleSetPriority", "type": "str"},
+ "scale_set_eviction_policy": {"key": "properties.scaleSetEvictionPolicy", "type": "str"},
+ "spot_max_price": {"key": "properties.spotMaxPrice", "type": "float"},
+ "tags": {"key": "properties.tags", "type": "{str}"},
+ "node_labels": {"key": "properties.nodeLabels", "type": "{str}"},
+ "node_taints": {"key": "properties.nodeTaints", "type": "[str]"},
+ "proximity_placement_group_id": {"key": "properties.proximityPlacementGroupID", "type": "str"},
+ "kubelet_config": {"key": "properties.kubeletConfig", "type": "KubeletConfig"},
+ "linux_os_config": {"key": "properties.linuxOSConfig", "type": "LinuxOSConfig"},
+ "enable_encryption_at_host": {"key": "properties.enableEncryptionAtHost", "type": "bool"},
+ "enable_ultra_ssd": {"key": "properties.enableUltraSSD", "type": "bool"},
+ "enable_fips": {"key": "properties.enableFIPS", "type": "bool"},
+ "gpu_instance_profile": {"key": "properties.gpuInstanceProfile", "type": "str"},
+ "creation_data": {"key": "properties.creationData", "type": "CreationData"},
+ "capacity_reservation_group_id": {"key": "properties.capacityReservationGroupID", "type": "str"},
+ "host_group_id": {"key": "properties.hostGroupID", "type": "str"},
+ "network_profile": {"key": "properties.networkProfile", "type": "AgentPoolNetworkProfile"},
+ "windows_profile": {"key": "properties.windowsProfile", "type": "AgentPoolWindowsProfile"},
+ "security_profile": {"key": "properties.securityProfile", "type": "AgentPoolSecurityProfile"},
+ "gpu_profile": {"key": "properties.gpuProfile", "type": "GPUProfile"},
+ "gateway_profile": {"key": "properties.gatewayProfile", "type": "AgentPoolGatewayProfile"},
+ "virtual_machines_profile": {"key": "properties.virtualMachinesProfile", "type": "VirtualMachinesProfile"},
+ "virtual_machine_nodes_status": {
+ "key": "properties.virtualMachineNodesStatus",
+ "type": "[VirtualMachineNodes]",
+ },
+ "status": {"key": "properties.status", "type": "AgentPoolStatus"},
+ "local_dns_profile": {"key": "properties.localDNSProfile", "type": "LocalDNSProfile"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ count: Optional[int] = None,
+ vm_size: Optional[str] = None,
+ os_disk_size_gb: Optional[int] = None,
+ os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None,
+ kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None,
+ workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None,
+ message_of_the_day: Optional[str] = None,
+ vnet_subnet_id: Optional[str] = None,
+ pod_subnet_id: Optional[str] = None,
+ pod_ip_allocation_mode: Optional[Union[str, "_models.PodIPAllocationMode"]] = None,
+ max_pods: Optional[int] = None,
+ os_type: Union[str, "_models.OSType"] = "Linux",
+ os_sku: Optional[Union[str, "_models.OSSKU"]] = None,
+ max_count: Optional[int] = None,
+ min_count: Optional[int] = None,
+ enable_auto_scaling: Optional[bool] = None,
+ scale_down_mode: Optional[Union[str, "_models.ScaleDownMode"]] = None,
+ type_properties_type: Optional[Union[str, "_models.AgentPoolType"]] = None,
+ mode: Optional[Union[str, "_models.AgentPoolMode"]] = None,
+ orchestrator_version: Optional[str] = None,
+ upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None,
+ power_state: Optional["_models.PowerState"] = None,
+ availability_zones: Optional[list[str]] = None,
+ enable_node_public_ip: Optional[bool] = None,
+ node_public_ip_prefix_id: Optional[str] = None,
+ scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular",
+ scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete",
+ spot_max_price: float = -1,
+ tags: Optional[dict[str, str]] = None,
+ node_labels: Optional[dict[str, str]] = None,
+ node_taints: Optional[list[str]] = None,
+ proximity_placement_group_id: Optional[str] = None,
+ kubelet_config: Optional["_models.KubeletConfig"] = None,
+ linux_os_config: Optional["_models.LinuxOSConfig"] = None,
+ enable_encryption_at_host: Optional[bool] = None,
+ enable_ultra_ssd: Optional[bool] = None,
+ enable_fips: Optional[bool] = None,
+ gpu_instance_profile: Optional[Union[str, "_models.GPUInstanceProfile"]] = None,
+ creation_data: Optional["_models.CreationData"] = None,
+ capacity_reservation_group_id: Optional[str] = None,
+ host_group_id: Optional[str] = None,
+ network_profile: Optional["_models.AgentPoolNetworkProfile"] = None,
+ windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None,
+ security_profile: Optional["_models.AgentPoolSecurityProfile"] = None,
+ gpu_profile: Optional["_models.GPUProfile"] = None,
+ gateway_profile: Optional["_models.AgentPoolGatewayProfile"] = None,
+ virtual_machines_profile: Optional["_models.VirtualMachinesProfile"] = None,
+ virtual_machine_nodes_status: Optional[list["_models.VirtualMachineNodes"]] = None,
+ status: Optional["_models.AgentPoolStatus"] = None,
+ local_dns_profile: Optional["_models.LocalDNSProfile"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the
+ range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for
+ system pools. The default value is 1.
+ :paramtype count: int
+ :keyword vm_size: The size of the agent pool VMs. VM size availability varies by region. If a
+ node contains insufficient compute resources (memory, cpu, etc) pods might fail to run
+ correctly. For more details on restricted VM sizes, see:
+ https://docs.microsoft.com/azure/aks/quotas-skus-regions.
+ :paramtype vm_size: str
+ :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
+ machine in the master/agent pool. If you specify 0, it will apply the default osDisk size
+ according to the vmSize specified.
+ :paramtype os_disk_size_gb: int
+ :keyword os_disk_type: The OS disk type to be used for machines in the agent pool. The default
+ is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested
+ OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more
+ information see `Ephemeral OS
+ `_. Known values are:
+ "Managed" and "Ephemeral".
+ :paramtype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType
+ :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime
+ data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary".
+ :paramtype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType
+ :keyword workload_runtime: Determines the type of workload a node can run. Known values are:
+ "OCIContainer", "WasmWasi", and "KataVmIsolation".
+ :paramtype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime
+ :keyword message_of_the_day: Message of the day for Linux nodes, base64-encoded. A
+ base64-encoded string which will be written to /etc/motd after decoding. This allows
+ customization of the message of the day for Linux nodes. It must not be specified for Windows
+ nodes. It must be a static string (i.e., will be printed raw and not be executed as a script).
+ :paramtype message_of_the_day: str
+ :keyword vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will
+ join on startup. If this is not specified, a VNET and subnet will be generated and used. If no
+ podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :paramtype vnet_subnet_id: str
+ :keyword pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted,
+ pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is
+ of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :paramtype pod_subnet_id: str
+ :keyword pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the
+ agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values
+ are: "DynamicIndividual" and "StaticBlock".
+ :paramtype pod_ip_allocation_mode: str or
+ ~azure.mgmt.containerservice.models.PodIPAllocationMode
+ :keyword max_pods: The maximum number of pods that can run on a node.
+ :paramtype max_pods: int
+ :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux"
+ and "Windows".
+ :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType
+ is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >=
+ 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3",
+ "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404".
+ :paramtype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU
+ :keyword max_count: The maximum number of nodes for auto-scaling.
+ :paramtype max_count: int
+ :keyword min_count: The minimum number of nodes for auto-scaling.
+ :paramtype min_count: int
+ :keyword enable_auto_scaling: Whether to enable auto-scaler.
+ :paramtype enable_auto_scaling: bool
+ :keyword scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also
+ effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values
+ are: "Delete" and "Deallocate".
+ :paramtype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode
+ :keyword type_properties_type: The type of Agent Pool. Known values are:
+ "VirtualMachineScaleSets", "AvailabilitySet", and "VirtualMachines".
+ :paramtype type_properties_type: str or ~azure.mgmt.containerservice.models.AgentPoolType
+ :keyword mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool
+ at all times. For additional information on agent pool restrictions and best practices, see:
+ https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and
+ "Gateway".
+ :paramtype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode
+ :keyword orchestrator_version: The version of Kubernetes specified by the user. Both patch
+ version (e.g. 1.20.13) and (e.g. 1.20) are supported. When
+ is specified, the latest supported GA patch version is chosen automatically.
+ Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14)
+ will not trigger an upgrade, even if a newer patch version is available. As a best practice,
+ you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node
+ pool version must have the same major version as the control plane. The node pool minor version
+ must be within two minor versions of the control plane version. The node pool version cannot be
+ greater than the control plane version. For more information see `upgrading a node pool
+ `_.
+ :paramtype orchestrator_version: str
+ :keyword upgrade_settings: Settings for upgrading the agentpool.
+ :paramtype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings
+ :keyword power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first
+ created it is initially Running. The Agent Pool can be stopped by setting this field to
+ Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An
+ Agent Pool can only be stopped if it is Running and provisioning state is Succeeded.
+ :paramtype power_state: ~azure.mgmt.containerservice.models.PowerState
+ :keyword availability_zones: The list of Availability zones to use for nodes. This can only be
+ specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
+ :paramtype availability_zones: list[str]
+ :keyword enable_node_public_ip: Whether each node is allocated its own public IP. Some
+ scenarios may require nodes in a node pool to receive their own dedicated public IP addresses.
+ A common scenario is for gaming workloads, where a console needs to make a direct connection to
+ a cloud virtual machine to minimize hops. For more information see `assigning a public IP per
+ node
+ `_.
+ The default is false.
+ :paramtype enable_node_public_ip: bool
+ :keyword node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}.
+ :paramtype node_public_ip_prefix_id: str
+ :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the
+ default is 'Regular'. Known values are: "Spot" and "Regular".
+ :paramtype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority
+ :keyword scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This
+ cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is
+ 'Delete'. Known values are: "Delete" and "Deallocate".
+ :paramtype scale_set_eviction_policy: str or
+ ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy
+ :keyword spot_max_price: The max price (in US Dollars) you are willing to pay for spot
+ instances. Possible values are any decimal value greater than zero or -1 which indicates
+ default price to be up-to on-demand. Possible values are any decimal value greater than zero or
+ -1 which indicates the willingness to pay any on-demand price. For more details on spot
+ pricing, see `spot VMs pricing
+ `_.
+ :paramtype spot_max_price: float
+ :keyword tags: The tags to be persisted on the agent pool virtual machine scale set.
+ :paramtype tags: dict[str, str]
+ :keyword node_labels: The node labels to be persisted across all nodes in agent pool.
+ :paramtype node_labels: dict[str, str]
+ :keyword node_taints: The taints added to new nodes during node pool create and scale. For
+ example, key=value:NoSchedule.
+ :paramtype node_taints: list[str]
+ :keyword proximity_placement_group_id: The ID for Proximity Placement Group.
+ :paramtype proximity_placement_group_id: str
+ :keyword kubelet_config: The Kubelet configuration on the agent pool nodes.
+ :paramtype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig
+ :keyword linux_os_config: The OS configuration of Linux agent nodes.
+ :paramtype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig
+ :keyword enable_encryption_at_host: Whether to enable host based OS and data drive encryption.
+ This is only supported on certain VM sizes and in certain Azure regions. For more information,
+ see: https://docs.microsoft.com/azure/aks/enable-host-encryption.
+ :paramtype enable_encryption_at_host: bool
+ :keyword enable_ultra_ssd: Whether to enable UltraSSD.
+ :paramtype enable_ultra_ssd: bool
+ :keyword enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool
+ `_
+ for more details.
+ :paramtype enable_fips: bool
+ :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance
+ profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and
+ "MIG7g".
+ :paramtype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile
+ :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node
+ pool will be created/upgraded using a snapshot.
+ :paramtype creation_data: ~azure.mgmt.containerservice.models.CreationData
+ :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the
+ Capacity Reservation Group.
+ :paramtype capacity_reservation_group_id: str
+ :keyword host_group_id: The fully qualified resource ID of the Dedicated Host Group to
+ provision virtual machines from, used only in creation scenario and not allowed to changed once
+ set. This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}.
+ For more information see `Azure dedicated hosts
+ `_.
+ :paramtype host_group_id: str
+ :keyword network_profile: Network-related settings of an agent pool.
+ :paramtype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile
+ :keyword windows_profile: The Windows agent pool's specific profile.
+ :paramtype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile
+ :keyword security_profile: The security settings of an agent pool.
+ :paramtype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile
+ :keyword gpu_profile: GPU settings for the Agent Pool.
+ :paramtype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile
+ :keyword gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field
+ cannot be set if agent pool mode is not Gateway.
+ :paramtype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile
+ :keyword virtual_machines_profile: Specifications on VirtualMachines agent pool.
+ :paramtype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile
+ :keyword virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool.
+ :paramtype virtual_machine_nodes_status:
+ list[~azure.mgmt.containerservice.models.VirtualMachineNodes]
+ :keyword status: Contains read-only information about the Agent Pool.
+ :paramtype status: ~azure.mgmt.containerservice.models.AgentPoolStatus
+ :keyword local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS
+ overrides. LocalDNS helps improve performance and reliability of DNS resolution in an AKS
+ cluster. For more details see aka.ms/aks/localdns.
+ :paramtype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile
+ """
+ super().__init__(**kwargs)
+ self.e_tag: Optional[str] = None
+ self.count = count
+ self.vm_size = vm_size
+ self.os_disk_size_gb = os_disk_size_gb
+ self.os_disk_type = os_disk_type
+ self.kubelet_disk_type = kubelet_disk_type
+ self.workload_runtime = workload_runtime
+ self.message_of_the_day = message_of_the_day
+ self.vnet_subnet_id = vnet_subnet_id
+ self.pod_subnet_id = pod_subnet_id
+ self.pod_ip_allocation_mode = pod_ip_allocation_mode
+ self.max_pods = max_pods
+ self.os_type = os_type
+ self.os_sku = os_sku
+ self.max_count = max_count
+ self.min_count = min_count
+ self.enable_auto_scaling = enable_auto_scaling
+ self.scale_down_mode = scale_down_mode
+ self.type_properties_type = type_properties_type
+ self.mode = mode
+ self.orchestrator_version = orchestrator_version
+ self.current_orchestrator_version: Optional[str] = None
+ self.node_image_version: Optional[str] = None
+ self.upgrade_settings = upgrade_settings
+ self.provisioning_state: Optional[str] = None
+ self.power_state = power_state
+ self.availability_zones = availability_zones
+ self.enable_node_public_ip = enable_node_public_ip
+ self.node_public_ip_prefix_id = node_public_ip_prefix_id
+ self.scale_set_priority = scale_set_priority
+ self.scale_set_eviction_policy = scale_set_eviction_policy
+ self.spot_max_price = spot_max_price
+ self.tags = tags
+ self.node_labels = node_labels
+ self.node_taints = node_taints
+ self.proximity_placement_group_id = proximity_placement_group_id
+ self.kubelet_config = kubelet_config
+ self.linux_os_config = linux_os_config
+ self.enable_encryption_at_host = enable_encryption_at_host
+ self.enable_ultra_ssd = enable_ultra_ssd
+ self.enable_fips = enable_fips
+ self.gpu_instance_profile = gpu_instance_profile
+ self.creation_data = creation_data
+ self.capacity_reservation_group_id = capacity_reservation_group_id
+ self.host_group_id = host_group_id
+ self.network_profile = network_profile
+ self.windows_profile = windows_profile
+ self.security_profile = security_profile
+ self.gpu_profile = gpu_profile
+ self.gateway_profile = gateway_profile
+ self.virtual_machines_profile = virtual_machines_profile
+ self.virtual_machine_nodes_status = virtual_machine_nodes_status
+ self.status = status
+ self.local_dns_profile = local_dns_profile
+
+
+class AgentPoolAvailableVersions(_serialization.Model):
+ """The list of available versions for an agent pool.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: The ID of the agent pool version list.
+ :vartype id: str
+ :ivar name: The name of the agent pool version list.
+ :vartype name: str
+ :ivar type: Type of the agent pool version list.
+ :vartype type: str
+ :ivar agent_pool_versions: List of versions available for agent pool.
+ :vartype agent_pool_versions:
+ list[~azure.mgmt.containerservice.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "agent_pool_versions": {
+ "key": "properties.agentPoolVersions",
+ "type": "[AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]",
+ },
+ }
+
+ def __init__(
+ self,
+ *,
+ agent_pool_versions: Optional[list["_models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword agent_pool_versions: List of versions available for agent pool.
+ :paramtype agent_pool_versions:
+ list[~azure.mgmt.containerservice.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]
+ """
+ super().__init__(**kwargs)
+ self.id: Optional[str] = None
+ self.name: Optional[str] = None
+ self.type: Optional[str] = None
+ self.agent_pool_versions = agent_pool_versions
+
+
+class AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem(_serialization.Model): # pylint: disable=name-too-long
+ """AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem.
+
+ :ivar default: Whether this version is the default agent pool version.
+ :vartype default: bool
+ :ivar kubernetes_version: The Kubernetes version (major.minor.patch).
+ :vartype kubernetes_version: str
+ :ivar is_preview: Whether Kubernetes version is currently in preview.
+ :vartype is_preview: bool
+ """
+
+ _attribute_map = {
+ "default": {"key": "default", "type": "bool"},
+ "kubernetes_version": {"key": "kubernetesVersion", "type": "str"},
+ "is_preview": {"key": "isPreview", "type": "bool"},
+ }
+
+ def __init__(
+ self,
+ *,
+ default: Optional[bool] = None,
+ kubernetes_version: Optional[str] = None,
+ is_preview: Optional[bool] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword default: Whether this version is the default agent pool version.
+ :paramtype default: bool
+ :keyword kubernetes_version: The Kubernetes version (major.minor.patch).
+ :paramtype kubernetes_version: str
+ :keyword is_preview: Whether Kubernetes version is currently in preview.
+ :paramtype is_preview: bool
+ """
+ super().__init__(**kwargs)
+ self.default = default
+ self.kubernetes_version = kubernetes_version
+ self.is_preview = is_preview
+
+
+class AgentPoolDeleteMachinesParameter(_serialization.Model):
+ """Specifies a list of machine names from the agent pool to be deleted.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar machine_names: The agent pool machine names. Required.
+ :vartype machine_names: list[str]
+ """
+
+ _validation = {
+ "machine_names": {"required": True},
+ }
+
+ _attribute_map = {
+ "machine_names": {"key": "machineNames", "type": "[str]"},
+ }
+
+ def __init__(self, *, machine_names: list[str], **kwargs: Any) -> None:
+ """
+ :keyword machine_names: The agent pool machine names. Required.
+ :paramtype machine_names: list[str]
+ """
+ super().__init__(**kwargs)
+ self.machine_names = machine_names
+
+
+class AgentPoolGatewayProfile(_serialization.Model):
+ """Profile of the managed cluster gateway agent pool.
+
+ :ivar public_ip_prefix_size: The Gateway agent pool associates one public IPPrefix for each
+ static egress gateway to provide public egress. The size of Public IPPrefix should be selected
+ by the user. Each node in the agent pool is assigned with one IP from the IPPrefix. The
+ IPPrefix size thus serves as a cap on the size of the Gateway agent pool. Due to Azure public
+ IPPrefix size limitation, the valid value range is [28, 31] (/31 = 2 nodes/IPs, /30 = 4
+ nodes/IPs, /29 = 8 nodes/IPs, /28 = 16 nodes/IPs). The default value is 31.
+ :vartype public_ip_prefix_size: int
+ """
+
+ _validation = {
+ "public_ip_prefix_size": {"maximum": 31, "minimum": 28},
+ }
+
+ _attribute_map = {
+ "public_ip_prefix_size": {"key": "publicIPPrefixSize", "type": "int"},
+ }
+
+ def __init__(self, *, public_ip_prefix_size: int = 31, **kwargs: Any) -> None:
+ """
+ :keyword public_ip_prefix_size: The Gateway agent pool associates one public IPPrefix for each
+ static egress gateway to provide public egress. The size of Public IPPrefix should be selected
+ by the user. Each node in the agent pool is assigned with one IP from the IPPrefix. The
+ IPPrefix size thus serves as a cap on the size of the Gateway agent pool. Due to Azure public
+ IPPrefix size limitation, the valid value range is [28, 31] (/31 = 2 nodes/IPs, /30 = 4
+ nodes/IPs, /29 = 8 nodes/IPs, /28 = 16 nodes/IPs). The default value is 31.
+ :paramtype public_ip_prefix_size: int
+ """
+ super().__init__(**kwargs)
+ self.public_ip_prefix_size = public_ip_prefix_size
+
+
+class AgentPoolListResult(_serialization.Model):
+ """The response from the List Agent Pools operation.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of agent pools.
+ :vartype value: list[~azure.mgmt.containerservice.models.AgentPool]
+ :ivar next_link: The URL to get the next set of agent pool results.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[AgentPool]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.AgentPool"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The list of agent pools.
+ :paramtype value: list[~azure.mgmt.containerservice.models.AgentPool]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link: Optional[str] = None
+
+
+class AgentPoolNetworkProfile(_serialization.Model):
+ """Network settings of an agent pool.
+
+ :ivar node_public_ip_tags: IPTags of instance-level public IPs.
+ :vartype node_public_ip_tags: list[~azure.mgmt.containerservice.models.IPTag]
+ :ivar allowed_host_ports: The port ranges that are allowed to access. The specified ranges are
+ allowed to overlap.
+ :vartype allowed_host_ports: list[~azure.mgmt.containerservice.models.PortRange]
+ :ivar application_security_groups: The IDs of the application security groups which agent pool
+ will associate when created.
+ :vartype application_security_groups: list[str]
+ """
+
+ _attribute_map = {
+ "node_public_ip_tags": {"key": "nodePublicIPTags", "type": "[IPTag]"},
+ "allowed_host_ports": {"key": "allowedHostPorts", "type": "[PortRange]"},
+ "application_security_groups": {"key": "applicationSecurityGroups", "type": "[str]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ node_public_ip_tags: Optional[list["_models.IPTag"]] = None,
+ allowed_host_ports: Optional[list["_models.PortRange"]] = None,
+ application_security_groups: Optional[list[str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword node_public_ip_tags: IPTags of instance-level public IPs.
+ :paramtype node_public_ip_tags: list[~azure.mgmt.containerservice.models.IPTag]
+ :keyword allowed_host_ports: The port ranges that are allowed to access. The specified ranges
+ are allowed to overlap.
+ :paramtype allowed_host_ports: list[~azure.mgmt.containerservice.models.PortRange]
+ :keyword application_security_groups: The IDs of the application security groups which agent
+ pool will associate when created.
+ :paramtype application_security_groups: list[str]
+ """
+ super().__init__(**kwargs)
+ self.node_public_ip_tags = node_public_ip_tags
+ self.allowed_host_ports = allowed_host_ports
+ self.application_security_groups = application_security_groups
+
+
+class AgentPoolSecurityProfile(_serialization.Model):
+ """The security settings of an agent pool.
+
+ :ivar enable_vtpm: vTPM is a Trusted Launch feature for configuring a dedicated secure vault
+ for keys and measurements held locally on the node. For more details, see
+ aka.ms/aks/trustedlaunch. If not specified, the default is false.
+ :vartype enable_vtpm: bool
+ :ivar enable_secure_boot: Secure Boot is a feature of Trusted Launch which ensures that only
+ signed operating systems and drivers can boot. For more details, see aka.ms/aks/trustedlaunch.
+ If not specified, the default is false.
+ :vartype enable_secure_boot: bool
+ :ivar ssh_access: SSH access method of an agent pool. Known values are: "LocalUser" and
+ "Disabled".
+ :vartype ssh_access: str or ~azure.mgmt.containerservice.models.AgentPoolSSHAccess
+ """
+
+ _attribute_map = {
+ "enable_vtpm": {"key": "enableVTPM", "type": "bool"},
+ "enable_secure_boot": {"key": "enableSecureBoot", "type": "bool"},
+ "ssh_access": {"key": "sshAccess", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enable_vtpm: Optional[bool] = None,
+ enable_secure_boot: Optional[bool] = None,
+ ssh_access: Optional[Union[str, "_models.AgentPoolSSHAccess"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enable_vtpm: vTPM is a Trusted Launch feature for configuring a dedicated secure vault
+ for keys and measurements held locally on the node. For more details, see
+ aka.ms/aks/trustedlaunch. If not specified, the default is false.
+ :paramtype enable_vtpm: bool
+ :keyword enable_secure_boot: Secure Boot is a feature of Trusted Launch which ensures that only
+ signed operating systems and drivers can boot. For more details, see aka.ms/aks/trustedlaunch.
+ If not specified, the default is false.
+ :paramtype enable_secure_boot: bool
+ :keyword ssh_access: SSH access method of an agent pool. Known values are: "LocalUser" and
+ "Disabled".
+ :paramtype ssh_access: str or ~azure.mgmt.containerservice.models.AgentPoolSSHAccess
+ """
+ super().__init__(**kwargs)
+ self.enable_vtpm = enable_vtpm
+ self.enable_secure_boot = enable_secure_boot
+ self.ssh_access = ssh_access
+
+
+class AgentPoolStatus(_serialization.Model):
+ """Contains read-only information about the Agent Pool.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar provisioning_error: The error detail information of the agent pool. Preserves the
+ detailed info of failure. If there was no error, this field is omitted.
+ :vartype provisioning_error: ~azure.mgmt.containerservice.models.ErrorDetail
+ """
+
+ _validation = {
+ "provisioning_error": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "provisioning_error": {"key": "provisioningError", "type": "ErrorDetail"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.provisioning_error: Optional["_models.ErrorDetail"] = None
+
+
+class AgentPoolUpgradeProfile(_serialization.Model):
+ """The list of available upgrades for an agent pool.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: The ID of the agent pool upgrade profile.
+ :vartype id: str
+ :ivar name: The name of the agent pool upgrade profile.
+ :vartype name: str
+ :ivar type: The type of the agent pool upgrade profile.
+ :vartype type: str
+ :ivar kubernetes_version: The Kubernetes version (major.minor.patch). Required.
+ :vartype kubernetes_version: str
+ :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and
+ "Windows".
+ :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :ivar upgrades: List of orchestrator types and versions available for upgrade.
+ :vartype upgrades:
+ list[~azure.mgmt.containerservice.models.AgentPoolUpgradeProfilePropertiesUpgradesItem]
+ :ivar latest_node_image_version: The latest AKS supported node image version.
+ :vartype latest_node_image_version: str
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "kubernetes_version": {"required": True},
+ "os_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"},
+ "os_type": {"key": "properties.osType", "type": "str"},
+ "upgrades": {"key": "properties.upgrades", "type": "[AgentPoolUpgradeProfilePropertiesUpgradesItem]"},
+ "latest_node_image_version": {"key": "properties.latestNodeImageVersion", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ kubernetes_version: str,
+ os_type: Union[str, "_models.OSType"] = "Linux",
+ upgrades: Optional[list["_models.AgentPoolUpgradeProfilePropertiesUpgradesItem"]] = None,
+ latest_node_image_version: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword kubernetes_version: The Kubernetes version (major.minor.patch). Required.
+ :paramtype kubernetes_version: str
+ :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux"
+ and "Windows".
+ :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :keyword upgrades: List of orchestrator types and versions available for upgrade.
+ :paramtype upgrades:
+ list[~azure.mgmt.containerservice.models.AgentPoolUpgradeProfilePropertiesUpgradesItem]
+ :keyword latest_node_image_version: The latest AKS supported node image version.
+ :paramtype latest_node_image_version: str
+ """
+ super().__init__(**kwargs)
+ self.id: Optional[str] = None
+ self.name: Optional[str] = None
+ self.type: Optional[str] = None
+ self.kubernetes_version = kubernetes_version
+ self.os_type = os_type
+ self.upgrades = upgrades
+ self.latest_node_image_version = latest_node_image_version
+
+
+class AgentPoolUpgradeProfilePropertiesUpgradesItem(_serialization.Model): # pylint: disable=name-too-long
+ """AgentPoolUpgradeProfilePropertiesUpgradesItem.
+
+ :ivar kubernetes_version: The Kubernetes version (major.minor.patch).
+ :vartype kubernetes_version: str
+ :ivar is_preview: Whether the Kubernetes version is currently in preview.
+ :vartype is_preview: bool
+ """
+
+ _attribute_map = {
+ "kubernetes_version": {"key": "kubernetesVersion", "type": "str"},
+ "is_preview": {"key": "isPreview", "type": "bool"},
+ }
+
+ def __init__(
+ self, *, kubernetes_version: Optional[str] = None, is_preview: Optional[bool] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword kubernetes_version: The Kubernetes version (major.minor.patch).
+ :paramtype kubernetes_version: str
+ :keyword is_preview: Whether the Kubernetes version is currently in preview.
+ :paramtype is_preview: bool
+ """
+ super().__init__(**kwargs)
+ self.kubernetes_version = kubernetes_version
+ self.is_preview = is_preview
+
+
+class AgentPoolUpgradeSettings(_serialization.Model):
+ """Settings for upgrading an agentpool.
+
+ :ivar max_surge: The maximum number or percentage of nodes that are surged during upgrade. This
+ can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage is
+ specified, it is the percentage of the total agent pool size at the time of the upgrade. For
+ percentages, fractional nodes are rounded up. If not specified, the default is 10%. For more
+ information, including best practices, see:
+ https://learn.microsoft.com/en-us/azure/aks/upgrade-cluster.
+ :vartype max_surge: str
+ :ivar max_unavailable: The maximum number or percentage of nodes that can be simultaneously
+ unavailable during upgrade. This can either be set to an integer (e.g. '1') or a percentage
+ (e.g. '5%'). If a percentage is specified, it is the percentage of the total agent pool size at
+ the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified,
+ the default is 0. For more information, including best practices, see:
+ https://learn.microsoft.com/en-us/azure/aks/upgrade-cluster.
+ :vartype max_unavailable: str
+ :ivar drain_timeout_in_minutes: The drain timeout for a node. The amount of time (in minutes)
+ to wait on eviction of pods and graceful termination per node. This eviction wait time honors
+ waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. If not
+ specified, the default is 30 minutes.
+ :vartype drain_timeout_in_minutes: int
+ :ivar node_soak_duration_in_minutes: The soak duration for a node. The amount of time (in
+ minutes) to wait after draining a node and before reimaging it and moving on to next node. If
+ not specified, the default is 0 minutes.
+ :vartype node_soak_duration_in_minutes: int
+ :ivar undrainable_node_behavior: Defines the behavior for undrainable nodes during upgrade. The
+ most common cause of undrainable nodes is Pod Disruption Budgets (PDBs), but other issues, such
+ as pod termination grace period is exceeding the remaining per-node drain timeout or pod is
+ still being in a running state, can also cause undrainable nodes. Known values are: "Cordon"
+ and "Schedule".
+ :vartype undrainable_node_behavior: str or
+ ~azure.mgmt.containerservice.models.UndrainableNodeBehavior
+ """
+
+ _validation = {
+ "drain_timeout_in_minutes": {"maximum": 1440, "minimum": 1},
+ "node_soak_duration_in_minutes": {"maximum": 30, "minimum": 0},
+ }
+
+ _attribute_map = {
+ "max_surge": {"key": "maxSurge", "type": "str"},
+ "max_unavailable": {"key": "maxUnavailable", "type": "str"},
+ "drain_timeout_in_minutes": {"key": "drainTimeoutInMinutes", "type": "int"},
+ "node_soak_duration_in_minutes": {"key": "nodeSoakDurationInMinutes", "type": "int"},
+ "undrainable_node_behavior": {"key": "undrainableNodeBehavior", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ max_surge: Optional[str] = None,
+ max_unavailable: Optional[str] = None,
+ drain_timeout_in_minutes: Optional[int] = None,
+ node_soak_duration_in_minutes: Optional[int] = None,
+ undrainable_node_behavior: Optional[Union[str, "_models.UndrainableNodeBehavior"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword max_surge: The maximum number or percentage of nodes that are surged during upgrade.
+ This can either be set to an integer (e.g. '5') or a percentage (e.g. '50%'). If a percentage
+ is specified, it is the percentage of the total agent pool size at the time of the upgrade. For
+ percentages, fractional nodes are rounded up. If not specified, the default is 10%. For more
+ information, including best practices, see:
+ https://learn.microsoft.com/en-us/azure/aks/upgrade-cluster.
+ :paramtype max_surge: str
+ :keyword max_unavailable: The maximum number or percentage of nodes that can be simultaneously
+ unavailable during upgrade. This can either be set to an integer (e.g. '1') or a percentage
+ (e.g. '5%'). If a percentage is specified, it is the percentage of the total agent pool size at
+ the time of the upgrade. For percentages, fractional nodes are rounded up. If not specified,
+ the default is 0. For more information, including best practices, see:
+ https://learn.microsoft.com/en-us/azure/aks/upgrade-cluster.
+ :paramtype max_unavailable: str
+ :keyword drain_timeout_in_minutes: The drain timeout for a node. The amount of time (in
+ minutes) to wait on eviction of pods and graceful termination per node. This eviction wait time
+ honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. If not
+ specified, the default is 30 minutes.
+ :paramtype drain_timeout_in_minutes: int
+ :keyword node_soak_duration_in_minutes: The soak duration for a node. The amount of time (in
+ minutes) to wait after draining a node and before reimaging it and moving on to next node. If
+ not specified, the default is 0 minutes.
+ :paramtype node_soak_duration_in_minutes: int
+ :keyword undrainable_node_behavior: Defines the behavior for undrainable nodes during upgrade.
+ The most common cause of undrainable nodes is Pod Disruption Budgets (PDBs), but other issues,
+ such as pod termination grace period is exceeding the remaining per-node drain timeout or pod
+ is still being in a running state, can also cause undrainable nodes. Known values are: "Cordon"
+ and "Schedule".
+ :paramtype undrainable_node_behavior: str or
+ ~azure.mgmt.containerservice.models.UndrainableNodeBehavior
+ """
+ super().__init__(**kwargs)
+ self.max_surge = max_surge
+ self.max_unavailable = max_unavailable
+ self.drain_timeout_in_minutes = drain_timeout_in_minutes
+ self.node_soak_duration_in_minutes = node_soak_duration_in_minutes
+ self.undrainable_node_behavior = undrainable_node_behavior
+
+
+class AgentPoolWindowsProfile(_serialization.Model):
+ """The Windows agent pool's specific profile.
+
+ :ivar disable_outbound_nat: Whether to disable OutboundNAT in windows nodes. The default value
+ is false. Outbound NAT can only be disabled if the cluster outboundType is NAT Gateway and the
+ Windows agent pool does not have node public IP enabled.
+ :vartype disable_outbound_nat: bool
+ """
+
+ _attribute_map = {
+ "disable_outbound_nat": {"key": "disableOutboundNat", "type": "bool"},
+ }
+
+ def __init__(self, *, disable_outbound_nat: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword disable_outbound_nat: Whether to disable OutboundNAT in windows nodes. The default
+ value is false. Outbound NAT can only be disabled if the cluster outboundType is NAT Gateway
+ and the Windows agent pool does not have node public IP enabled.
+ :paramtype disable_outbound_nat: bool
+ """
+ super().__init__(**kwargs)
+ self.disable_outbound_nat = disable_outbound_nat
+
+
+class AzureKeyVaultKms(_serialization.Model):
+ """Azure Key Vault key management service settings for the security profile.
+
+ :ivar enabled: Whether to enable Azure Key Vault key management service. The default is false.
+ :vartype enabled: bool
+ :ivar key_id: Identifier of Azure Key Vault key. See `key identifier format
+ `_
+ for more details. When Azure Key Vault key management service is enabled, this field is
+ required and must be a valid key identifier. When Azure Key Vault key management service is
+ disabled, leave the field empty.
+ :vartype key_id: str
+ :ivar key_vault_network_access: Network access of the key vault. Network access of key vault.
+ The possible values are ``Public`` and ``Private``. ``Public`` means the key vault allows
+ public access from all networks. ``Private`` means the key vault disables public access and
+ enables private link. The default value is ``Public``. Known values are: "Public" and
+ "Private".
+ :vartype key_vault_network_access: str or
+ ~azure.mgmt.containerservice.models.KeyVaultNetworkAccessTypes
+ :ivar key_vault_resource_id: Resource ID of key vault. When keyVaultNetworkAccess is
+ ``Private``\\ , this field is required and must be a valid resource ID. When
+ keyVaultNetworkAccess is ``Public``\\ , leave the field empty.
+ :vartype key_vault_resource_id: str
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "key_id": {"key": "keyId", "type": "str"},
+ "key_vault_network_access": {"key": "keyVaultNetworkAccess", "type": "str"},
+ "key_vault_resource_id": {"key": "keyVaultResourceId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enabled: Optional[bool] = None,
+ key_id: Optional[str] = None,
+ key_vault_network_access: Union[str, "_models.KeyVaultNetworkAccessTypes"] = "Public",
+ key_vault_resource_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enabled: Whether to enable Azure Key Vault key management service. The default is
+ false.
+ :paramtype enabled: bool
+ :keyword key_id: Identifier of Azure Key Vault key. See `key identifier format
+ `_
+ for more details. When Azure Key Vault key management service is enabled, this field is
+ required and must be a valid key identifier. When Azure Key Vault key management service is
+ disabled, leave the field empty.
+ :paramtype key_id: str
+ :keyword key_vault_network_access: Network access of the key vault. Network access of key
+ vault. The possible values are ``Public`` and ``Private``. ``Public`` means the key vault
+ allows public access from all networks. ``Private`` means the key vault disables public access
+ and enables private link. The default value is ``Public``. Known values are: "Public" and
+ "Private".
+ :paramtype key_vault_network_access: str or
+ ~azure.mgmt.containerservice.models.KeyVaultNetworkAccessTypes
+ :keyword key_vault_resource_id: Resource ID of key vault. When keyVaultNetworkAccess is
+ ``Private``\\ , this field is required and must be a valid resource ID. When
+ keyVaultNetworkAccess is ``Public``\\ , leave the field empty.
+ :paramtype key_vault_resource_id: str
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.key_id = key_id
+ self.key_vault_network_access = key_vault_network_access
+ self.key_vault_resource_id = key_vault_resource_id
+
+
+class ClusterUpgradeSettings(_serialization.Model):
+ """Settings for upgrading a cluster.
+
+ :ivar override_settings: Settings for overrides.
+ :vartype override_settings: ~azure.mgmt.containerservice.models.UpgradeOverrideSettings
+ """
+
+ _attribute_map = {
+ "override_settings": {"key": "overrideSettings", "type": "UpgradeOverrideSettings"},
+ }
+
+ def __init__(self, *, override_settings: Optional["_models.UpgradeOverrideSettings"] = None, **kwargs: Any) -> None:
+ """
+ :keyword override_settings: Settings for overrides.
+ :paramtype override_settings: ~azure.mgmt.containerservice.models.UpgradeOverrideSettings
+ """
+ super().__init__(**kwargs)
+ self.override_settings = override_settings
+
+
+class CompatibleVersions(_serialization.Model):
+ """Version information about a product/service that is compatible with a service mesh revision.
+
+ :ivar name: The product/service name.
+ :vartype name: str
+ :ivar versions: Product/service versions compatible with a service mesh add-on revision.
+ :vartype versions: list[str]
+ """
+
+ _attribute_map = {
+ "name": {"key": "name", "type": "str"},
+ "versions": {"key": "versions", "type": "[str]"},
+ }
+
+ def __init__(self, *, name: Optional[str] = None, versions: Optional[list[str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword name: The product/service name.
+ :paramtype name: str
+ :keyword versions: Product/service versions compatible with a service mesh add-on revision.
+ :paramtype versions: list[str]
+ """
+ super().__init__(**kwargs)
+ self.name = name
+ self.versions = versions
+
+
+class ContainerServiceLinuxProfile(_serialization.Model):
+ """Profile for Linux VMs in the container service cluster.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar admin_username: The administrator username to use for Linux VMs. Required.
+ :vartype admin_username: str
+ :ivar ssh: The SSH configuration for Linux-based VMs running on Azure. Required.
+ :vartype ssh: ~azure.mgmt.containerservice.models.ContainerServiceSshConfiguration
+ """
+
+ _validation = {
+ "admin_username": {"required": True, "pattern": r"^[A-Za-z][-A-Za-z0-9_]*$"},
+ "ssh": {"required": True},
+ }
+
+ _attribute_map = {
+ "admin_username": {"key": "adminUsername", "type": "str"},
+ "ssh": {"key": "ssh", "type": "ContainerServiceSshConfiguration"},
+ }
+
+ def __init__(self, *, admin_username: str, ssh: "_models.ContainerServiceSshConfiguration", **kwargs: Any) -> None:
+ """
+ :keyword admin_username: The administrator username to use for Linux VMs. Required.
+ :paramtype admin_username: str
+ :keyword ssh: The SSH configuration for Linux-based VMs running on Azure. Required.
+ :paramtype ssh: ~azure.mgmt.containerservice.models.ContainerServiceSshConfiguration
+ """
+ super().__init__(**kwargs)
+ self.admin_username = admin_username
+ self.ssh = ssh
+
+
+class ContainerServiceNetworkProfile(_serialization.Model):
+ """Profile of network configuration.
+
+ :ivar network_plugin: Network plugin used for building the Kubernetes network. Known values
+ are: "azure", "kubenet", and "none".
+ :vartype network_plugin: str or ~azure.mgmt.containerservice.models.NetworkPlugin
+ :ivar network_plugin_mode: The mode the network plugin should use. "overlay"
+ :vartype network_plugin_mode: str or ~azure.mgmt.containerservice.models.NetworkPluginMode
+ :ivar network_policy: Network policy used for building the Kubernetes network. Known values
+ are: "none", "calico", "azure", and "cilium".
+ :vartype network_policy: str or ~azure.mgmt.containerservice.models.NetworkPolicy
+ :ivar network_mode: The network mode Azure CNI is configured with. This cannot be specified if
+ networkPlugin is anything other than 'azure'. Known values are: "transparent" and "bridge".
+ :vartype network_mode: str or ~azure.mgmt.containerservice.models.NetworkMode
+ :ivar network_dataplane: Network dataplane used in the Kubernetes cluster. Known values are:
+ "azure" and "cilium".
+ :vartype network_dataplane: str or ~azure.mgmt.containerservice.models.NetworkDataplane
+ :ivar advanced_networking: Advanced Networking profile for enabling observability and security
+ feature suite on a cluster. For more information see aka.ms/aksadvancednetworking.
+ :vartype advanced_networking: ~azure.mgmt.containerservice.models.AdvancedNetworking
+ :ivar pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
+ :vartype pod_cidr: str
+ :ivar service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must
+ not overlap with any Subnet IP ranges.
+ :vartype service_cidr: str
+ :ivar dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within
+ the Kubernetes service address range specified in serviceCidr.
+ :vartype dns_service_ip: str
+ :ivar outbound_type: The outbound (egress) routing method. This can only be set at cluster
+ creation time and cannot be changed later. For more information see `egress outbound type
+ `_. Known values are: "loadBalancer",
+ "userDefinedRouting", "managedNATGateway", "userAssignedNATGateway", and "none".
+ :vartype outbound_type: str or ~azure.mgmt.containerservice.models.OutboundType
+ :ivar load_balancer_sku: The load balancer sku for the managed cluster. The default is
+ 'standard'. See `Azure Load Balancer SKUs
+ `_ for more information about the
+ differences between load balancer SKUs. Known values are: "standard" and "basic".
+ :vartype load_balancer_sku: str or ~azure.mgmt.containerservice.models.LoadBalancerSku
+ :ivar load_balancer_profile: Profile of the cluster load balancer.
+ :vartype load_balancer_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfile
+ :ivar nat_gateway_profile: Profile of the cluster NAT gateway.
+ :vartype nat_gateway_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterNATGatewayProfile
+ :ivar static_egress_gateway_profile: The profile for Static Egress Gateway addon. For more
+ details about Static Egress Gateway, see https://aka.ms/aks/static-egress-gateway.
+ :vartype static_egress_gateway_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterStaticEgressGatewayProfile
+ :ivar pod_cidrs: The CIDR notation IP ranges from which to assign pod IPs. One IPv4 CIDR is
+ expected for single-stack networking. Two CIDRs, one for each IP family (IPv4/IPv6), is
+ expected for dual-stack networking.
+ :vartype pod_cidrs: list[str]
+ :ivar service_cidrs: The CIDR notation IP ranges from which to assign service cluster IPs. One
+ IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for each IP family
+ (IPv4/IPv6), is expected for dual-stack networking. They must not overlap with any Subnet IP
+ ranges.
+ :vartype service_cidrs: list[str]
+ :ivar ip_families: The IP families used to specify IP versions available to the cluster. IP
+ families are used to determine single-stack or dual-stack clusters. For single-stack, the
+ expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6.
+ :vartype ip_families: list[str or ~azure.mgmt.containerservice.models.IpFamily]
+ """
+
+ _validation = {
+ "pod_cidr": {"pattern": r"^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$"},
+ "service_cidr": {"pattern": r"^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$"},
+ "dns_service_ip": {
+ "pattern": r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
+ },
+ }
+
+ _attribute_map = {
+ "network_plugin": {"key": "networkPlugin", "type": "str"},
+ "network_plugin_mode": {"key": "networkPluginMode", "type": "str"},
+ "network_policy": {"key": "networkPolicy", "type": "str"},
+ "network_mode": {"key": "networkMode", "type": "str"},
+ "network_dataplane": {"key": "networkDataplane", "type": "str"},
+ "advanced_networking": {"key": "advancedNetworking", "type": "AdvancedNetworking"},
+ "pod_cidr": {"key": "podCidr", "type": "str"},
+ "service_cidr": {"key": "serviceCidr", "type": "str"},
+ "dns_service_ip": {"key": "dnsServiceIP", "type": "str"},
+ "outbound_type": {"key": "outboundType", "type": "str"},
+ "load_balancer_sku": {"key": "loadBalancerSku", "type": "str"},
+ "load_balancer_profile": {"key": "loadBalancerProfile", "type": "ManagedClusterLoadBalancerProfile"},
+ "nat_gateway_profile": {"key": "natGatewayProfile", "type": "ManagedClusterNATGatewayProfile"},
+ "static_egress_gateway_profile": {
+ "key": "staticEgressGatewayProfile",
+ "type": "ManagedClusterStaticEgressGatewayProfile",
+ },
+ "pod_cidrs": {"key": "podCidrs", "type": "[str]"},
+ "service_cidrs": {"key": "serviceCidrs", "type": "[str]"},
+ "ip_families": {"key": "ipFamilies", "type": "[str]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ network_plugin: Optional[Union[str, "_models.NetworkPlugin"]] = None,
+ network_plugin_mode: Optional[Union[str, "_models.NetworkPluginMode"]] = None,
+ network_policy: Optional[Union[str, "_models.NetworkPolicy"]] = None,
+ network_mode: Optional[Union[str, "_models.NetworkMode"]] = None,
+ network_dataplane: Optional[Union[str, "_models.NetworkDataplane"]] = None,
+ advanced_networking: Optional["_models.AdvancedNetworking"] = None,
+ pod_cidr: str = "10.244.0.0/16",
+ service_cidr: str = "10.0.0.0/16",
+ dns_service_ip: str = "10.0.0.10",
+ outbound_type: Union[str, "_models.OutboundType"] = "loadBalancer",
+ load_balancer_sku: Optional[Union[str, "_models.LoadBalancerSku"]] = None,
+ load_balancer_profile: Optional["_models.ManagedClusterLoadBalancerProfile"] = None,
+ nat_gateway_profile: Optional["_models.ManagedClusterNATGatewayProfile"] = None,
+ static_egress_gateway_profile: Optional["_models.ManagedClusterStaticEgressGatewayProfile"] = None,
+ pod_cidrs: Optional[list[str]] = None,
+ service_cidrs: Optional[list[str]] = None,
+ ip_families: Optional[list[Union[str, "_models.IpFamily"]]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword network_plugin: Network plugin used for building the Kubernetes network. Known values
+ are: "azure", "kubenet", and "none".
+ :paramtype network_plugin: str or ~azure.mgmt.containerservice.models.NetworkPlugin
+ :keyword network_plugin_mode: The mode the network plugin should use. "overlay"
+ :paramtype network_plugin_mode: str or ~azure.mgmt.containerservice.models.NetworkPluginMode
+ :keyword network_policy: Network policy used for building the Kubernetes network. Known values
+ are: "none", "calico", "azure", and "cilium".
+ :paramtype network_policy: str or ~azure.mgmt.containerservice.models.NetworkPolicy
+ :keyword network_mode: The network mode Azure CNI is configured with. This cannot be specified
+ if networkPlugin is anything other than 'azure'. Known values are: "transparent" and "bridge".
+ :paramtype network_mode: str or ~azure.mgmt.containerservice.models.NetworkMode
+ :keyword network_dataplane: Network dataplane used in the Kubernetes cluster. Known values are:
+ "azure" and "cilium".
+ :paramtype network_dataplane: str or ~azure.mgmt.containerservice.models.NetworkDataplane
+ :keyword advanced_networking: Advanced Networking profile for enabling observability and
+ security feature suite on a cluster. For more information see aka.ms/aksadvancednetworking.
+ :paramtype advanced_networking: ~azure.mgmt.containerservice.models.AdvancedNetworking
+ :keyword pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
+ :paramtype pod_cidr: str
+ :keyword service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It
+ must not overlap with any Subnet IP ranges.
+ :paramtype service_cidr: str
+ :keyword dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be
+ within the Kubernetes service address range specified in serviceCidr.
+ :paramtype dns_service_ip: str
+ :keyword outbound_type: The outbound (egress) routing method. This can only be set at cluster
+ creation time and cannot be changed later. For more information see `egress outbound type
+ `_. Known values are: "loadBalancer",
+ "userDefinedRouting", "managedNATGateway", "userAssignedNATGateway", and "none".
+ :paramtype outbound_type: str or ~azure.mgmt.containerservice.models.OutboundType
+ :keyword load_balancer_sku: The load balancer sku for the managed cluster. The default is
+ 'standard'. See `Azure Load Balancer SKUs
+ `_ for more information about the
+ differences between load balancer SKUs. Known values are: "standard" and "basic".
+ :paramtype load_balancer_sku: str or ~azure.mgmt.containerservice.models.LoadBalancerSku
+ :keyword load_balancer_profile: Profile of the cluster load balancer.
+ :paramtype load_balancer_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfile
+ :keyword nat_gateway_profile: Profile of the cluster NAT gateway.
+ :paramtype nat_gateway_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterNATGatewayProfile
+ :keyword static_egress_gateway_profile: The profile for Static Egress Gateway addon. For more
+ details about Static Egress Gateway, see https://aka.ms/aks/static-egress-gateway.
+ :paramtype static_egress_gateway_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterStaticEgressGatewayProfile
+ :keyword pod_cidrs: The CIDR notation IP ranges from which to assign pod IPs. One IPv4 CIDR is
+ expected for single-stack networking. Two CIDRs, one for each IP family (IPv4/IPv6), is
+ expected for dual-stack networking.
+ :paramtype pod_cidrs: list[str]
+ :keyword service_cidrs: The CIDR notation IP ranges from which to assign service cluster IPs.
+ One IPv4 CIDR is expected for single-stack networking. Two CIDRs, one for each IP family
+ (IPv4/IPv6), is expected for dual-stack networking. They must not overlap with any Subnet IP
+ ranges.
+ :paramtype service_cidrs: list[str]
+ :keyword ip_families: The IP families used to specify IP versions available to the cluster. IP
+ families are used to determine single-stack or dual-stack clusters. For single-stack, the
+ expected value is IPv4. For dual-stack, the expected values are IPv4 and IPv6.
+ :paramtype ip_families: list[str or ~azure.mgmt.containerservice.models.IpFamily]
+ """
+ super().__init__(**kwargs)
+ self.network_plugin = network_plugin
+ self.network_plugin_mode = network_plugin_mode
+ self.network_policy = network_policy
+ self.network_mode = network_mode
+ self.network_dataplane = network_dataplane
+ self.advanced_networking = advanced_networking
+ self.pod_cidr = pod_cidr
+ self.service_cidr = service_cidr
+ self.dns_service_ip = dns_service_ip
+ self.outbound_type = outbound_type
+ self.load_balancer_sku = load_balancer_sku
+ self.load_balancer_profile = load_balancer_profile
+ self.nat_gateway_profile = nat_gateway_profile
+ self.static_egress_gateway_profile = static_egress_gateway_profile
+ self.pod_cidrs = pod_cidrs
+ self.service_cidrs = service_cidrs
+ self.ip_families = ip_families
+
+
+class ContainerServiceSshConfiguration(_serialization.Model):
+ """SSH configuration for Linux-based VMs running on Azure.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. A
+ maximum of 1 key may be specified. Required.
+ :vartype public_keys: list[~azure.mgmt.containerservice.models.ContainerServiceSshPublicKey]
+ """
+
+ _validation = {
+ "public_keys": {"required": True},
+ }
+
+ _attribute_map = {
+ "public_keys": {"key": "publicKeys", "type": "[ContainerServiceSshPublicKey]"},
+ }
+
+ def __init__(self, *, public_keys: list["_models.ContainerServiceSshPublicKey"], **kwargs: Any) -> None:
+ """
+ :keyword public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. A
+ maximum of 1 key may be specified. Required.
+ :paramtype public_keys: list[~azure.mgmt.containerservice.models.ContainerServiceSshPublicKey]
+ """
+ super().__init__(**kwargs)
+ self.public_keys = public_keys
+
+
+class ContainerServiceSshPublicKey(_serialization.Model):
+ """Contains information about SSH certificate public key data.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar key_data: Certificate public key used to authenticate with VMs through SSH. The
+ certificate must be in PEM format with or without headers. Required.
+ :vartype key_data: str
+ """
+
+ _validation = {
+ "key_data": {"required": True},
+ }
+
+ _attribute_map = {
+ "key_data": {"key": "keyData", "type": "str"},
+ }
+
+ def __init__(self, *, key_data: str, **kwargs: Any) -> None:
+ """
+ :keyword key_data: Certificate public key used to authenticate with VMs through SSH. The
+ certificate must be in PEM format with or without headers. Required.
+ :paramtype key_data: str
+ """
+ super().__init__(**kwargs)
+ self.key_data = key_data
+
+
+class CreationData(_serialization.Model):
+ """Data used when creating a target resource from a source resource.
+
+ :ivar source_resource_id: This is the ARM ID of the source object to be used to create the
+ target object.
+ :vartype source_resource_id: str
+ """
+
+ _attribute_map = {
+ "source_resource_id": {"key": "sourceResourceId", "type": "str"},
+ }
+
+ def __init__(self, *, source_resource_id: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword source_resource_id: This is the ARM ID of the source object to be used to create the
+ target object.
+ :paramtype source_resource_id: str
+ """
+ super().__init__(**kwargs)
+ self.source_resource_id = source_resource_id
+
+
+class CredentialResult(_serialization.Model):
+ """The credential result response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar name: The name of the credential.
+ :vartype name: str
+ :ivar value: Base64-encoded Kubernetes configuration file.
+ :vartype value: bytes
+ """
+
+ _validation = {
+ "name": {"readonly": True},
+ "value": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "name": {"key": "name", "type": "str"},
+ "value": {"key": "value", "type": "bytearray"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.name: Optional[str] = None
+ self.value: Optional[bytes] = None
+
+
+class CredentialResults(_serialization.Model):
+ """The list credential result response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar kubeconfigs: Base64-encoded Kubernetes configuration file.
+ :vartype kubeconfigs: list[~azure.mgmt.containerservice.models.CredentialResult]
+ """
+
+ _validation = {
+ "kubeconfigs": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "kubeconfigs": {"key": "kubeconfigs", "type": "[CredentialResult]"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.kubeconfigs: Optional[list["_models.CredentialResult"]] = None
+
+
+class DailySchedule(_serialization.Model):
+ """For schedules like: 'recur every day' or 'recur every 3 days'.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar interval_days: Specifies the number of days between each set of occurrences. Required.
+ :vartype interval_days: int
+ """
+
+ _validation = {
+ "interval_days": {"required": True, "maximum": 7, "minimum": 1},
+ }
+
+ _attribute_map = {
+ "interval_days": {"key": "intervalDays", "type": "int"},
+ }
+
+ def __init__(self, *, interval_days: int, **kwargs: Any) -> None:
+ """
+ :keyword interval_days: Specifies the number of days between each set of occurrences. Required.
+ :paramtype interval_days: int
+ """
+ super().__init__(**kwargs)
+ self.interval_days = interval_days
+
+
+class DateSpan(_serialization.Model):
+ """A date range. For example, between '2022-12-23' and '2023-01-05'.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar start: The start date of the date span. Required.
+ :vartype start: ~datetime.date
+ :ivar end: The end date of the date span. Required.
+ :vartype end: ~datetime.date
+ """
+
+ _validation = {
+ "start": {"required": True},
+ "end": {"required": True},
+ }
+
+ _attribute_map = {
+ "start": {"key": "start", "type": "date"},
+ "end": {"key": "end", "type": "date"},
+ }
+
+ def __init__(self, *, start: datetime.date, end: datetime.date, **kwargs: Any) -> None:
+ """
+ :keyword start: The start date of the date span. Required.
+ :paramtype start: ~datetime.date
+ :keyword end: The end date of the date span. Required.
+ :paramtype end: ~datetime.date
+ """
+ super().__init__(**kwargs)
+ self.start = start
+ self.end = end
+
+
+class DelegatedResource(_serialization.Model):
+ """Delegated resource properties - internal use only.
+
+ :ivar resource_id: The ARM resource id of the delegated resource - internal use only.
+ :vartype resource_id: str
+ :ivar tenant_id: The tenant id of the delegated resource - internal use only.
+ :vartype tenant_id: str
+ :ivar referral_resource: The delegation id of the referral delegation (optional) - internal use
+ only.
+ :vartype referral_resource: str
+ :ivar location: The source resource location - internal use only.
+ :vartype location: str
+ """
+
+ _attribute_map = {
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ "referral_resource": {"key": "referralResource", "type": "str"},
+ "location": {"key": "location", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ resource_id: Optional[str] = None,
+ tenant_id: Optional[str] = None,
+ referral_resource: Optional[str] = None,
+ location: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword resource_id: The ARM resource id of the delegated resource - internal use only.
+ :paramtype resource_id: str
+ :keyword tenant_id: The tenant id of the delegated resource - internal use only.
+ :paramtype tenant_id: str
+ :keyword referral_resource: The delegation id of the referral delegation (optional) - internal
+ use only.
+ :paramtype referral_resource: str
+ :keyword location: The source resource location - internal use only.
+ :paramtype location: str
+ """
+ super().__init__(**kwargs)
+ self.resource_id = resource_id
+ self.tenant_id = tenant_id
+ self.referral_resource = referral_resource
+ self.location = location
+
+
+class EndpointDependency(_serialization.Model):
+ """A domain name that AKS agent nodes are reaching at.
+
+ :ivar domain_name: The domain name of the dependency.
+ :vartype domain_name: str
+ :ivar endpoint_details: The Ports and Protocols used when connecting to domainName.
+ :vartype endpoint_details: list[~azure.mgmt.containerservice.models.EndpointDetail]
+ """
+
+ _attribute_map = {
+ "domain_name": {"key": "domainName", "type": "str"},
+ "endpoint_details": {"key": "endpointDetails", "type": "[EndpointDetail]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ domain_name: Optional[str] = None,
+ endpoint_details: Optional[list["_models.EndpointDetail"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword domain_name: The domain name of the dependency.
+ :paramtype domain_name: str
+ :keyword endpoint_details: The Ports and Protocols used when connecting to domainName.
+ :paramtype endpoint_details: list[~azure.mgmt.containerservice.models.EndpointDetail]
+ """
+ super().__init__(**kwargs)
+ self.domain_name = domain_name
+ self.endpoint_details = endpoint_details
+
+
+class EndpointDetail(_serialization.Model):
+ """connect information from the AKS agent nodes to a single endpoint.
+
+ :ivar ip_address: An IP Address that Domain Name currently resolves to.
+ :vartype ip_address: str
+ :ivar port: The port an endpoint is connected to.
+ :vartype port: int
+ :ivar protocol: The protocol used for connection.
+ :vartype protocol: str
+ :ivar description: Description of the detail.
+ :vartype description: str
+ """
+
+ _attribute_map = {
+ "ip_address": {"key": "ipAddress", "type": "str"},
+ "port": {"key": "port", "type": "int"},
+ "protocol": {"key": "protocol", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ ip_address: Optional[str] = None,
+ port: Optional[int] = None,
+ protocol: Optional[str] = None,
+ description: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ip_address: An IP Address that Domain Name currently resolves to.
+ :paramtype ip_address: str
+ :keyword port: The port an endpoint is connected to.
+ :paramtype port: int
+ :keyword protocol: The protocol used for connection.
+ :paramtype protocol: str
+ :keyword description: Description of the detail.
+ :paramtype description: str
+ """
+ super().__init__(**kwargs)
+ self.ip_address = ip_address
+ self.port = port
+ self.protocol = protocol
+ self.description = description
+
+
+class ErrorAdditionalInfo(_serialization.Model):
+ """The resource management error additional info.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar type: The additional info type.
+ :vartype type: str
+ :ivar info: The additional info.
+ :vartype info: JSON
+ """
+
+ _validation = {
+ "type": {"readonly": True},
+ "info": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "type": {"key": "type", "type": "str"},
+ "info": {"key": "info", "type": "object"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.type: Optional[str] = None
+ self.info: Optional[JSON] = None
+
+
+class ErrorDetail(_serialization.Model):
+ """The error detail.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar code: The error code.
+ :vartype code: str
+ :ivar message: The error message.
+ :vartype message: str
+ :ivar target: The error target.
+ :vartype target: str
+ :ivar details: The error details.
+ :vartype details: list[~azure.mgmt.containerservice.models.ErrorDetail]
+ :ivar additional_info: The error additional info.
+ :vartype additional_info: list[~azure.mgmt.containerservice.models.ErrorAdditionalInfo]
+ """
+
+ _validation = {
+ "code": {"readonly": True},
+ "message": {"readonly": True},
+ "target": {"readonly": True},
+ "details": {"readonly": True},
+ "additional_info": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "code": {"key": "code", "type": "str"},
+ "message": {"key": "message", "type": "str"},
+ "target": {"key": "target", "type": "str"},
+ "details": {"key": "details", "type": "[ErrorDetail]"},
+ "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.code: Optional[str] = None
+ self.message: Optional[str] = None
+ self.target: Optional[str] = None
+ self.details: Optional[list["_models.ErrorDetail"]] = None
+ self.additional_info: Optional[list["_models.ErrorAdditionalInfo"]] = None
+
+
+class ErrorResponse(_serialization.Model):
+ """Common error response for all Azure Resource Manager APIs to return error details for failed
+ operations. (This also follows the OData error response format.).
+
+ :ivar error: The error object.
+ :vartype error: ~azure.mgmt.containerservice.models.ErrorDetail
+ """
+
+ _attribute_map = {
+ "error": {"key": "error", "type": "ErrorDetail"},
+ }
+
+ def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None:
+ """
+ :keyword error: The error object.
+ :paramtype error: ~azure.mgmt.containerservice.models.ErrorDetail
+ """
+ super().__init__(**kwargs)
+ self.error = error
+
+
+class ExtendedLocation(_serialization.Model):
+ """The complex type of the extended location.
+
+ :ivar name: The name of the extended location.
+ :vartype name: str
+ :ivar type: The type of the extended location. "EdgeZone"
+ :vartype type: str or ~azure.mgmt.containerservice.models.ExtendedLocationTypes
+ """
+
+ _attribute_map = {
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ type: Optional[Union[str, "_models.ExtendedLocationTypes"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword name: The name of the extended location.
+ :paramtype name: str
+ :keyword type: The type of the extended location. "EdgeZone"
+ :paramtype type: str or ~azure.mgmt.containerservice.models.ExtendedLocationTypes
+ """
+ super().__init__(**kwargs)
+ self.name = name
+ self.type = type
+
+
+class GPUProfile(_serialization.Model):
+ """GPU settings for the Agent Pool.
+
+ :ivar driver: Whether to install GPU drivers. When it's not specified, default is Install.
+ Known values are: "Install" and "None".
+ :vartype driver: str or ~azure.mgmt.containerservice.models.GPUDriver
+ """
+
+ _attribute_map = {
+ "driver": {"key": "driver", "type": "str"},
+ }
+
+ def __init__(self, *, driver: Optional[Union[str, "_models.GPUDriver"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword driver: Whether to install GPU drivers. When it's not specified, default is Install.
+ Known values are: "Install" and "None".
+ :paramtype driver: str or ~azure.mgmt.containerservice.models.GPUDriver
+ """
+ super().__init__(**kwargs)
+ self.driver = driver
+
+
+class IPTag(_serialization.Model):
+ """Contains the IPTag associated with the object.
+
+ :ivar ip_tag_type: The IP tag type. Example: RoutingPreference.
+ :vartype ip_tag_type: str
+ :ivar tag: The value of the IP tag associated with the public IP. Example: Internet.
+ :vartype tag: str
+ """
+
+ _attribute_map = {
+ "ip_tag_type": {"key": "ipTagType", "type": "str"},
+ "tag": {"key": "tag", "type": "str"},
+ }
+
+ def __init__(self, *, ip_tag_type: Optional[str] = None, tag: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword ip_tag_type: The IP tag type. Example: RoutingPreference.
+ :paramtype ip_tag_type: str
+ :keyword tag: The value of the IP tag associated with the public IP. Example: Internet.
+ :paramtype tag: str
+ """
+ super().__init__(**kwargs)
+ self.ip_tag_type = ip_tag_type
+ self.tag = tag
+
+
+class IstioCertificateAuthority(_serialization.Model):
+ """Istio Service Mesh Certificate Authority (CA) configuration. For now, we only support plugin
+ certificates as described here https://aka.ms/asm-plugin-ca.
+
+ :ivar plugin: Plugin certificates information for Service Mesh.
+ :vartype plugin: ~azure.mgmt.containerservice.models.IstioPluginCertificateAuthority
+ """
+
+ _attribute_map = {
+ "plugin": {"key": "plugin", "type": "IstioPluginCertificateAuthority"},
+ }
+
+ def __init__(self, *, plugin: Optional["_models.IstioPluginCertificateAuthority"] = None, **kwargs: Any) -> None:
+ """
+ :keyword plugin: Plugin certificates information for Service Mesh.
+ :paramtype plugin: ~azure.mgmt.containerservice.models.IstioPluginCertificateAuthority
+ """
+ super().__init__(**kwargs)
+ self.plugin = plugin
+
+
+class IstioComponents(_serialization.Model):
+ """Istio components configuration.
+
+ :ivar ingress_gateways: Istio ingress gateways.
+ :vartype ingress_gateways: list[~azure.mgmt.containerservice.models.IstioIngressGateway]
+ :ivar egress_gateways: Istio egress gateways.
+ :vartype egress_gateways: list[~azure.mgmt.containerservice.models.IstioEgressGateway]
+ """
+
+ _attribute_map = {
+ "ingress_gateways": {"key": "ingressGateways", "type": "[IstioIngressGateway]"},
+ "egress_gateways": {"key": "egressGateways", "type": "[IstioEgressGateway]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ ingress_gateways: Optional[list["_models.IstioIngressGateway"]] = None,
+ egress_gateways: Optional[list["_models.IstioEgressGateway"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ingress_gateways: Istio ingress gateways.
+ :paramtype ingress_gateways: list[~azure.mgmt.containerservice.models.IstioIngressGateway]
+ :keyword egress_gateways: Istio egress gateways.
+ :paramtype egress_gateways: list[~azure.mgmt.containerservice.models.IstioEgressGateway]
+ """
+ super().__init__(**kwargs)
+ self.ingress_gateways = ingress_gateways
+ self.egress_gateways = egress_gateways
+
+
+class IstioEgressGateway(_serialization.Model):
+ """Istio egress gateway configuration.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar enabled: Whether to enable the egress gateway. Required.
+ :vartype enabled: bool
+ :ivar name: Name of the Istio add-on egress gateway. Required.
+ :vartype name: str
+ :ivar namespace: Namespace that the Istio add-on egress gateway should be deployed in. If
+ unspecified, the default is aks-istio-egress.
+ :vartype namespace: str
+ :ivar gateway_configuration_name: Name of the gateway configuration custom resource for the
+ Istio add-on egress gateway. Must be specified when enabling the Istio egress gateway. Must be
+ deployed in the same namespace that the Istio egress gateway will be deployed in.
+ :vartype gateway_configuration_name: str
+ """
+
+ _validation = {
+ "enabled": {"required": True},
+ "name": {"required": True, "pattern": r"[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*"},
+ }
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "name": {"key": "name", "type": "str"},
+ "namespace": {"key": "namespace", "type": "str"},
+ "gateway_configuration_name": {"key": "gatewayConfigurationName", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enabled: bool,
+ name: str,
+ namespace: Optional[str] = None,
+ gateway_configuration_name: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enabled: Whether to enable the egress gateway. Required.
+ :paramtype enabled: bool
+ :keyword name: Name of the Istio add-on egress gateway. Required.
+ :paramtype name: str
+ :keyword namespace: Namespace that the Istio add-on egress gateway should be deployed in. If
+ unspecified, the default is aks-istio-egress.
+ :paramtype namespace: str
+ :keyword gateway_configuration_name: Name of the gateway configuration custom resource for the
+ Istio add-on egress gateway. Must be specified when enabling the Istio egress gateway. Must be
+ deployed in the same namespace that the Istio egress gateway will be deployed in.
+ :paramtype gateway_configuration_name: str
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.name = name
+ self.namespace = namespace
+ self.gateway_configuration_name = gateway_configuration_name
+
+
+class IstioIngressGateway(_serialization.Model):
+ """Istio ingress gateway configuration. For now, we support up to one external ingress gateway
+ named ``aks-istio-ingressgateway-external`` and one internal ingress gateway named
+ ``aks-istio-ingressgateway-internal``.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar mode: Mode of an ingress gateway. Required. Known values are: "External" and "Internal".
+ :vartype mode: str or ~azure.mgmt.containerservice.models.IstioIngressGatewayMode
+ :ivar enabled: Whether to enable the ingress gateway. Required.
+ :vartype enabled: bool
+ """
+
+ _validation = {
+ "mode": {"required": True},
+ "enabled": {"required": True},
+ }
+
+ _attribute_map = {
+ "mode": {"key": "mode", "type": "str"},
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, mode: Union[str, "_models.IstioIngressGatewayMode"], enabled: bool, **kwargs: Any) -> None:
+ """
+ :keyword mode: Mode of an ingress gateway. Required. Known values are: "External" and
+ "Internal".
+ :paramtype mode: str or ~azure.mgmt.containerservice.models.IstioIngressGatewayMode
+ :keyword enabled: Whether to enable the ingress gateway. Required.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.mode = mode
+ self.enabled = enabled
+
+
+class IstioPluginCertificateAuthority(_serialization.Model):
+ """Plugin certificates information for Service Mesh.
+
+ :ivar key_vault_id: The resource ID of the Key Vault.
+ :vartype key_vault_id: str
+ :ivar cert_object_name: Intermediate certificate object name in Azure Key Vault.
+ :vartype cert_object_name: str
+ :ivar key_object_name: Intermediate certificate private key object name in Azure Key Vault.
+ :vartype key_object_name: str
+ :ivar root_cert_object_name: Root certificate object name in Azure Key Vault.
+ :vartype root_cert_object_name: str
+ :ivar cert_chain_object_name: Certificate chain object name in Azure Key Vault.
+ :vartype cert_chain_object_name: str
+ """
+
+ _attribute_map = {
+ "key_vault_id": {"key": "keyVaultId", "type": "str"},
+ "cert_object_name": {"key": "certObjectName", "type": "str"},
+ "key_object_name": {"key": "keyObjectName", "type": "str"},
+ "root_cert_object_name": {"key": "rootCertObjectName", "type": "str"},
+ "cert_chain_object_name": {"key": "certChainObjectName", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ key_vault_id: Optional[str] = None,
+ cert_object_name: Optional[str] = None,
+ key_object_name: Optional[str] = None,
+ root_cert_object_name: Optional[str] = None,
+ cert_chain_object_name: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword key_vault_id: The resource ID of the Key Vault.
+ :paramtype key_vault_id: str
+ :keyword cert_object_name: Intermediate certificate object name in Azure Key Vault.
+ :paramtype cert_object_name: str
+ :keyword key_object_name: Intermediate certificate private key object name in Azure Key Vault.
+ :paramtype key_object_name: str
+ :keyword root_cert_object_name: Root certificate object name in Azure Key Vault.
+ :paramtype root_cert_object_name: str
+ :keyword cert_chain_object_name: Certificate chain object name in Azure Key Vault.
+ :paramtype cert_chain_object_name: str
+ """
+ super().__init__(**kwargs)
+ self.key_vault_id = key_vault_id
+ self.cert_object_name = cert_object_name
+ self.key_object_name = key_object_name
+ self.root_cert_object_name = root_cert_object_name
+ self.cert_chain_object_name = cert_chain_object_name
+
+
+class IstioServiceMesh(_serialization.Model):
+ """Istio service mesh configuration.
+
+ :ivar components: Istio components configuration.
+ :vartype components: ~azure.mgmt.containerservice.models.IstioComponents
+ :ivar certificate_authority: Istio Service Mesh Certificate Authority (CA) configuration. For
+ now, we only support plugin certificates as described here https://aka.ms/asm-plugin-ca.
+ :vartype certificate_authority: ~azure.mgmt.containerservice.models.IstioCertificateAuthority
+ :ivar revisions: The list of revisions of the Istio control plane. When an upgrade is not in
+ progress, this holds one value. When canary upgrade is in progress, this can only hold two
+ consecutive values. For more information, see:
+ https://learn.microsoft.com/en-us/azure/aks/istio-upgrade.
+ :vartype revisions: list[str]
+ """
+
+ _validation = {
+ "revisions": {"max_items": 2, "min_items": 0, "unique": True},
+ }
+
+ _attribute_map = {
+ "components": {"key": "components", "type": "IstioComponents"},
+ "certificate_authority": {"key": "certificateAuthority", "type": "IstioCertificateAuthority"},
+ "revisions": {"key": "revisions", "type": "[str]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ components: Optional["_models.IstioComponents"] = None,
+ certificate_authority: Optional["_models.IstioCertificateAuthority"] = None,
+ revisions: Optional[list[str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword components: Istio components configuration.
+ :paramtype components: ~azure.mgmt.containerservice.models.IstioComponents
+ :keyword certificate_authority: Istio Service Mesh Certificate Authority (CA) configuration.
+ For now, we only support plugin certificates as described here https://aka.ms/asm-plugin-ca.
+ :paramtype certificate_authority: ~azure.mgmt.containerservice.models.IstioCertificateAuthority
+ :keyword revisions: The list of revisions of the Istio control plane. When an upgrade is not in
+ progress, this holds one value. When canary upgrade is in progress, this can only hold two
+ consecutive values. For more information, see:
+ https://learn.microsoft.com/en-us/azure/aks/istio-upgrade.
+ :paramtype revisions: list[str]
+ """
+ super().__init__(**kwargs)
+ self.components = components
+ self.certificate_authority = certificate_authority
+ self.revisions = revisions
+
+
+class KubeletConfig(_serialization.Model):
+ """Kubelet configurations of agent nodes. See `AKS custom node configuration
+ `_ for more details.
+
+ :ivar cpu_manager_policy: The CPU Manager policy to use. The default is 'none'. See `Kubernetes
+ CPU management policies
+ `_
+ for more information. Allowed values are 'none' and 'static'.
+ :vartype cpu_manager_policy: str
+ :ivar cpu_cfs_quota: If CPU CFS quota enforcement is enabled for containers that specify CPU
+ limits. The default is true.
+ :vartype cpu_cfs_quota: bool
+ :ivar cpu_cfs_quota_period: The CPU CFS quota period value. The default is '100ms.' Valid
+ values are a sequence of decimal numbers with an optional fraction and a unit suffix. For
+ example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
+ :vartype cpu_cfs_quota_period: str
+ :ivar image_gc_high_threshold: The percent of disk usage after which image garbage collection
+ is always run. To disable image garbage collection, set to 100. The default is 85%.
+ :vartype image_gc_high_threshold: int
+ :ivar image_gc_low_threshold: The percent of disk usage before which image garbage collection
+ is never run. This cannot be set higher than imageGcHighThreshold. The default is 80%.
+ :vartype image_gc_low_threshold: int
+ :ivar topology_manager_policy: The Topology Manager policy to use. For more information see
+ `Kubernetes Topology Manager
+ `_. The default is
+ 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
+ :vartype topology_manager_policy: str
+ :ivar allowed_unsafe_sysctls: Allowed list of unsafe sysctls or unsafe sysctl patterns (ending
+ in ``*``\\ ).
+ :vartype allowed_unsafe_sysctls: list[str]
+ :ivar fail_swap_on: If set to true it will make the Kubelet fail to start if swap is enabled on
+ the node.
+ :vartype fail_swap_on: bool
+ :ivar container_log_max_size_mb: The maximum size (e.g. 10Mi) of container log file before it
+ is rotated.
+ :vartype container_log_max_size_mb: int
+ :ivar container_log_max_files: The maximum number of container log files that can be present
+ for a container. The number must be ā„ 2.
+ :vartype container_log_max_files: int
+ :ivar pod_max_pids: The maximum number of processes per pod.
+ :vartype pod_max_pids: int
+ """
+
+ _validation = {
+ "container_log_max_files": {"minimum": 2},
+ }
+
+ _attribute_map = {
+ "cpu_manager_policy": {"key": "cpuManagerPolicy", "type": "str"},
+ "cpu_cfs_quota": {"key": "cpuCfsQuota", "type": "bool"},
+ "cpu_cfs_quota_period": {"key": "cpuCfsQuotaPeriod", "type": "str"},
+ "image_gc_high_threshold": {"key": "imageGcHighThreshold", "type": "int"},
+ "image_gc_low_threshold": {"key": "imageGcLowThreshold", "type": "int"},
+ "topology_manager_policy": {"key": "topologyManagerPolicy", "type": "str"},
+ "allowed_unsafe_sysctls": {"key": "allowedUnsafeSysctls", "type": "[str]"},
+ "fail_swap_on": {"key": "failSwapOn", "type": "bool"},
+ "container_log_max_size_mb": {"key": "containerLogMaxSizeMB", "type": "int"},
+ "container_log_max_files": {"key": "containerLogMaxFiles", "type": "int"},
+ "pod_max_pids": {"key": "podMaxPids", "type": "int"},
+ }
+
+ def __init__(
+ self,
+ *,
+ cpu_manager_policy: Optional[str] = None,
+ cpu_cfs_quota: Optional[bool] = None,
+ cpu_cfs_quota_period: Optional[str] = None,
+ image_gc_high_threshold: Optional[int] = None,
+ image_gc_low_threshold: Optional[int] = None,
+ topology_manager_policy: Optional[str] = None,
+ allowed_unsafe_sysctls: Optional[list[str]] = None,
+ fail_swap_on: Optional[bool] = None,
+ container_log_max_size_mb: Optional[int] = None,
+ container_log_max_files: Optional[int] = None,
+ pod_max_pids: Optional[int] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword cpu_manager_policy: The CPU Manager policy to use. The default is 'none'. See
+ `Kubernetes CPU management policies
+ `_
+ for more information. Allowed values are 'none' and 'static'.
+ :paramtype cpu_manager_policy: str
+ :keyword cpu_cfs_quota: If CPU CFS quota enforcement is enabled for containers that specify CPU
+ limits. The default is true.
+ :paramtype cpu_cfs_quota: bool
+ :keyword cpu_cfs_quota_period: The CPU CFS quota period value. The default is '100ms.' Valid
+ values are a sequence of decimal numbers with an optional fraction and a unit suffix. For
+ example: '300ms', '2h45m'. Supported units are 'ns', 'us', 'ms', 's', 'm', and 'h'.
+ :paramtype cpu_cfs_quota_period: str
+ :keyword image_gc_high_threshold: The percent of disk usage after which image garbage
+ collection is always run. To disable image garbage collection, set to 100. The default is 85%.
+ :paramtype image_gc_high_threshold: int
+ :keyword image_gc_low_threshold: The percent of disk usage before which image garbage
+ collection is never run. This cannot be set higher than imageGcHighThreshold. The default is
+ 80%.
+ :paramtype image_gc_low_threshold: int
+ :keyword topology_manager_policy: The Topology Manager policy to use. For more information see
+ `Kubernetes Topology Manager
+ `_. The default is
+ 'none'. Allowed values are 'none', 'best-effort', 'restricted', and 'single-numa-node'.
+ :paramtype topology_manager_policy: str
+ :keyword allowed_unsafe_sysctls: Allowed list of unsafe sysctls or unsafe sysctl patterns
+ (ending in ``*``\\ ).
+ :paramtype allowed_unsafe_sysctls: list[str]
+ :keyword fail_swap_on: If set to true it will make the Kubelet fail to start if swap is enabled
+ on the node.
+ :paramtype fail_swap_on: bool
+ :keyword container_log_max_size_mb: The maximum size (e.g. 10Mi) of container log file before
+ it is rotated.
+ :paramtype container_log_max_size_mb: int
+ :keyword container_log_max_files: The maximum number of container log files that can be present
+ for a container. The number must be ā„ 2.
+ :paramtype container_log_max_files: int
+ :keyword pod_max_pids: The maximum number of processes per pod.
+ :paramtype pod_max_pids: int
+ """
+ super().__init__(**kwargs)
+ self.cpu_manager_policy = cpu_manager_policy
+ self.cpu_cfs_quota = cpu_cfs_quota
+ self.cpu_cfs_quota_period = cpu_cfs_quota_period
+ self.image_gc_high_threshold = image_gc_high_threshold
+ self.image_gc_low_threshold = image_gc_low_threshold
+ self.topology_manager_policy = topology_manager_policy
+ self.allowed_unsafe_sysctls = allowed_unsafe_sysctls
+ self.fail_swap_on = fail_swap_on
+ self.container_log_max_size_mb = container_log_max_size_mb
+ self.container_log_max_files = container_log_max_files
+ self.pod_max_pids = pod_max_pids
+
+
+class KubernetesPatchVersion(_serialization.Model):
+ """Kubernetes patch version profile.
+
+ :ivar upgrades: Possible upgrade path for given patch version.
+ :vartype upgrades: list[str]
+ """
+
+ _attribute_map = {
+ "upgrades": {"key": "upgrades", "type": "[str]"},
+ }
+
+ def __init__(self, *, upgrades: Optional[list[str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword upgrades: Possible upgrade path for given patch version.
+ :paramtype upgrades: list[str]
+ """
+ super().__init__(**kwargs)
+ self.upgrades = upgrades
+
+
+class KubernetesVersion(_serialization.Model):
+ """Kubernetes version profile for given major.minor release.
+
+ :ivar version: major.minor version of Kubernetes release.
+ :vartype version: str
+ :ivar capabilities: Capabilities on this Kubernetes version.
+ :vartype capabilities: ~azure.mgmt.containerservice.models.KubernetesVersionCapabilities
+ :ivar is_default: Whether this version is default.
+ :vartype is_default: bool
+ :ivar is_preview: Whether this version is in preview mode.
+ :vartype is_preview: bool
+ :ivar patch_versions: Patch versions of Kubernetes release.
+ :vartype patch_versions: dict[str, ~azure.mgmt.containerservice.models.KubernetesPatchVersion]
+ """
+
+ _attribute_map = {
+ "version": {"key": "version", "type": "str"},
+ "capabilities": {"key": "capabilities", "type": "KubernetesVersionCapabilities"},
+ "is_default": {"key": "isDefault", "type": "bool"},
+ "is_preview": {"key": "isPreview", "type": "bool"},
+ "patch_versions": {"key": "patchVersions", "type": "{KubernetesPatchVersion}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ version: Optional[str] = None,
+ capabilities: Optional["_models.KubernetesVersionCapabilities"] = None,
+ is_default: Optional[bool] = None,
+ is_preview: Optional[bool] = None,
+ patch_versions: Optional[dict[str, "_models.KubernetesPatchVersion"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword version: major.minor version of Kubernetes release.
+ :paramtype version: str
+ :keyword capabilities: Capabilities on this Kubernetes version.
+ :paramtype capabilities: ~azure.mgmt.containerservice.models.KubernetesVersionCapabilities
+ :keyword is_default: Whether this version is default.
+ :paramtype is_default: bool
+ :keyword is_preview: Whether this version is in preview mode.
+ :paramtype is_preview: bool
+ :keyword patch_versions: Patch versions of Kubernetes release.
+ :paramtype patch_versions: dict[str,
+ ~azure.mgmt.containerservice.models.KubernetesPatchVersion]
+ """
+ super().__init__(**kwargs)
+ self.version = version
+ self.capabilities = capabilities
+ self.is_default = is_default
+ self.is_preview = is_preview
+ self.patch_versions = patch_versions
+
+
+class KubernetesVersionCapabilities(_serialization.Model):
+ """Capabilities on this Kubernetes version.
+
+ :ivar support_plan:
+ :vartype support_plan: list[str or ~azure.mgmt.containerservice.models.KubernetesSupportPlan]
+ """
+
+ _attribute_map = {
+ "support_plan": {"key": "supportPlan", "type": "[str]"},
+ }
+
+ def __init__(
+ self, *, support_plan: Optional[list[Union[str, "_models.KubernetesSupportPlan"]]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword support_plan:
+ :paramtype support_plan: list[str or ~azure.mgmt.containerservice.models.KubernetesSupportPlan]
+ """
+ super().__init__(**kwargs)
+ self.support_plan = support_plan
+
+
+class KubernetesVersionListResult(_serialization.Model):
+ """Hold values properties, which is array of KubernetesVersion.
+
+ :ivar values: Array of AKS supported Kubernetes versions.
+ :vartype values: list[~azure.mgmt.containerservice.models.KubernetesVersion]
+ """
+
+ _attribute_map = {
+ "values": {"key": "values", "type": "[KubernetesVersion]"},
+ }
+
+ def __init__(self, *, values: Optional[list["_models.KubernetesVersion"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword values: Array of AKS supported Kubernetes versions.
+ :paramtype values: list[~azure.mgmt.containerservice.models.KubernetesVersion]
+ """
+ super().__init__(**kwargs)
+ self.values = values
+
+
+class LinuxOSConfig(_serialization.Model):
+ """OS configurations of Linux agent nodes. See `AKS custom node configuration
+ `_ for more details.
+
+ :ivar sysctls: Sysctl settings for Linux agent nodes.
+ :vartype sysctls: ~azure.mgmt.containerservice.models.SysctlConfig
+ :ivar transparent_huge_page_enabled: Whether transparent hugepages are enabled. Valid values
+ are 'always', 'madvise', and 'never'. The default is 'always'. For more information see
+ `Transparent Hugepages
+ `_.
+ :vartype transparent_huge_page_enabled: str
+ :ivar transparent_huge_page_defrag: Whether the kernel should make aggressive use of memory
+ compaction to make more hugepages available. Valid values are 'always', 'defer',
+ 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see
+ `Transparent Hugepages
+ `_.
+ :vartype transparent_huge_page_defrag: str
+ :ivar swap_file_size_mb: The size in MB of a swap file that will be created on each node.
+ :vartype swap_file_size_mb: int
+ """
+
+ _attribute_map = {
+ "sysctls": {"key": "sysctls", "type": "SysctlConfig"},
+ "transparent_huge_page_enabled": {"key": "transparentHugePageEnabled", "type": "str"},
+ "transparent_huge_page_defrag": {"key": "transparentHugePageDefrag", "type": "str"},
+ "swap_file_size_mb": {"key": "swapFileSizeMB", "type": "int"},
+ }
+
+ def __init__(
+ self,
+ *,
+ sysctls: Optional["_models.SysctlConfig"] = None,
+ transparent_huge_page_enabled: Optional[str] = None,
+ transparent_huge_page_defrag: Optional[str] = None,
+ swap_file_size_mb: Optional[int] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword sysctls: Sysctl settings for Linux agent nodes.
+ :paramtype sysctls: ~azure.mgmt.containerservice.models.SysctlConfig
+ :keyword transparent_huge_page_enabled: Whether transparent hugepages are enabled. Valid values
+ are 'always', 'madvise', and 'never'. The default is 'always'. For more information see
+ `Transparent Hugepages
+ `_.
+ :paramtype transparent_huge_page_enabled: str
+ :keyword transparent_huge_page_defrag: Whether the kernel should make aggressive use of memory
+ compaction to make more hugepages available. Valid values are 'always', 'defer',
+ 'defer+madvise', 'madvise' and 'never'. The default is 'madvise'. For more information see
+ `Transparent Hugepages
+ `_.
+ :paramtype transparent_huge_page_defrag: str
+ :keyword swap_file_size_mb: The size in MB of a swap file that will be created on each node.
+ :paramtype swap_file_size_mb: int
+ """
+ super().__init__(**kwargs)
+ self.sysctls = sysctls
+ self.transparent_huge_page_enabled = transparent_huge_page_enabled
+ self.transparent_huge_page_defrag = transparent_huge_page_defrag
+ self.swap_file_size_mb = swap_file_size_mb
+
+
+class LocalDNSOverride(_serialization.Model):
+ """Overrides for localDNS profile.
+
+ :ivar query_logging: Log level for DNS queries in localDNS. Known values are: "Error" and
+ "Log".
+ :vartype query_logging: str or ~azure.mgmt.containerservice.models.LocalDNSQueryLogging
+ :ivar protocol: Enforce TCP or prefer UDP protocol for connections from localDNS to upstream
+ DNS server. Known values are: "PreferUDP" and "ForceTCP".
+ :vartype protocol: str or ~azure.mgmt.containerservice.models.LocalDNSProtocol
+ :ivar forward_destination: Destination server for DNS queries to be forwarded from localDNS.
+ Known values are: "ClusterCoreDNS" and "VnetDNS".
+ :vartype forward_destination: str or
+ ~azure.mgmt.containerservice.models.LocalDNSForwardDestination
+ :ivar forward_policy: Forward policy for selecting upstream DNS server. See `forward plugin
+ `_ for more information. Known values are: "Sequential",
+ "RoundRobin", and "Random".
+ :vartype forward_policy: str or ~azure.mgmt.containerservice.models.LocalDNSForwardPolicy
+ :ivar max_concurrent: Maximum number of concurrent queries. See `forward plugin
+ `_ for more information.
+ :vartype max_concurrent: int
+ :ivar cache_duration_in_seconds: Cache max TTL in seconds. See `cache plugin
+ `_ for more information.
+ :vartype cache_duration_in_seconds: int
+ :ivar serve_stale_duration_in_seconds: Serve stale duration in seconds. See `cache plugin
+ `_ for more information.
+ :vartype serve_stale_duration_in_seconds: int
+ :ivar serve_stale: Policy for serving stale data. See `cache plugin
+ `_ for more information. Known values are: "Verify",
+ "Immediate", and "Disable".
+ :vartype serve_stale: str or ~azure.mgmt.containerservice.models.LocalDNSServeStale
+ """
+
+ _attribute_map = {
+ "query_logging": {"key": "queryLogging", "type": "str"},
+ "protocol": {"key": "protocol", "type": "str"},
+ "forward_destination": {"key": "forwardDestination", "type": "str"},
+ "forward_policy": {"key": "forwardPolicy", "type": "str"},
+ "max_concurrent": {"key": "maxConcurrent", "type": "int"},
+ "cache_duration_in_seconds": {"key": "cacheDurationInSeconds", "type": "int"},
+ "serve_stale_duration_in_seconds": {"key": "serveStaleDurationInSeconds", "type": "int"},
+ "serve_stale": {"key": "serveStale", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ query_logging: Union[str, "_models.LocalDNSQueryLogging"] = "Error",
+ protocol: Union[str, "_models.LocalDNSProtocol"] = "PreferUDP",
+ forward_destination: Union[str, "_models.LocalDNSForwardDestination"] = "ClusterCoreDNS",
+ forward_policy: Union[str, "_models.LocalDNSForwardPolicy"] = "Sequential",
+ max_concurrent: int = 1000,
+ cache_duration_in_seconds: int = 3600,
+ serve_stale_duration_in_seconds: int = 3600,
+ serve_stale: Union[str, "_models.LocalDNSServeStale"] = "Immediate",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword query_logging: Log level for DNS queries in localDNS. Known values are: "Error" and
+ "Log".
+ :paramtype query_logging: str or ~azure.mgmt.containerservice.models.LocalDNSQueryLogging
+ :keyword protocol: Enforce TCP or prefer UDP protocol for connections from localDNS to upstream
+ DNS server. Known values are: "PreferUDP" and "ForceTCP".
+ :paramtype protocol: str or ~azure.mgmt.containerservice.models.LocalDNSProtocol
+ :keyword forward_destination: Destination server for DNS queries to be forwarded from localDNS.
+ Known values are: "ClusterCoreDNS" and "VnetDNS".
+ :paramtype forward_destination: str or
+ ~azure.mgmt.containerservice.models.LocalDNSForwardDestination
+ :keyword forward_policy: Forward policy for selecting upstream DNS server. See `forward plugin
+ `_ for more information. Known values are: "Sequential",
+ "RoundRobin", and "Random".
+ :paramtype forward_policy: str or ~azure.mgmt.containerservice.models.LocalDNSForwardPolicy
+ :keyword max_concurrent: Maximum number of concurrent queries. See `forward plugin
+ `_ for more information.
+ :paramtype max_concurrent: int
+ :keyword cache_duration_in_seconds: Cache max TTL in seconds. See `cache plugin
+ `_ for more information.
+ :paramtype cache_duration_in_seconds: int
+ :keyword serve_stale_duration_in_seconds: Serve stale duration in seconds. See `cache plugin
+ `_ for more information.
+ :paramtype serve_stale_duration_in_seconds: int
+ :keyword serve_stale: Policy for serving stale data. See `cache plugin
+ `_ for more information. Known values are: "Verify",
+ "Immediate", and "Disable".
+ :paramtype serve_stale: str or ~azure.mgmt.containerservice.models.LocalDNSServeStale
+ """
+ super().__init__(**kwargs)
+ self.query_logging = query_logging
+ self.protocol = protocol
+ self.forward_destination = forward_destination
+ self.forward_policy = forward_policy
+ self.max_concurrent = max_concurrent
+ self.cache_duration_in_seconds = cache_duration_in_seconds
+ self.serve_stale_duration_in_seconds = serve_stale_duration_in_seconds
+ self.serve_stale = serve_stale
+
+
+class LocalDNSProfile(_serialization.Model):
+ """Configures the per-node local DNS, with VnetDNS and KubeDNS overrides. LocalDNS helps improve
+ performance and reliability of DNS resolution in an AKS cluster. For more details see
+ aka.ms/aks/localdns.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar mode: Mode of enablement for localDNS. Known values are: "Preferred", "Required", and
+ "Disabled".
+ :vartype mode: str or ~azure.mgmt.containerservice.models.LocalDNSMode
+ :ivar state: System-generated state of localDNS. Known values are: "Enabled" and "Disabled".
+ :vartype state: str or ~azure.mgmt.containerservice.models.LocalDNSState
+ :ivar vnet_dns_overrides: VnetDNS overrides apply to DNS traffic from pods with
+ dnsPolicy:default or kubelet (referred to as VnetDNS traffic).
+ :vartype vnet_dns_overrides: dict[str, ~azure.mgmt.containerservice.models.LocalDNSOverride]
+ :ivar kube_dns_overrides: KubeDNS overrides apply to DNS traffic from pods with
+ dnsPolicy:ClusterFirst (referred to as KubeDNS traffic).
+ :vartype kube_dns_overrides: dict[str, ~azure.mgmt.containerservice.models.LocalDNSOverride]
+ """
+
+ _validation = {
+ "state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "mode": {"key": "mode", "type": "str"},
+ "state": {"key": "state", "type": "str"},
+ "vnet_dns_overrides": {"key": "vnetDNSOverrides", "type": "{LocalDNSOverride}"},
+ "kube_dns_overrides": {"key": "kubeDNSOverrides", "type": "{LocalDNSOverride}"},
+ }
+
+ def __init__(
+ self,
+ *,
+ mode: Union[str, "_models.LocalDNSMode"] = "Preferred",
+ vnet_dns_overrides: Optional[dict[str, "_models.LocalDNSOverride"]] = None,
+ kube_dns_overrides: Optional[dict[str, "_models.LocalDNSOverride"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword mode: Mode of enablement for localDNS. Known values are: "Preferred", "Required", and
+ "Disabled".
+ :paramtype mode: str or ~azure.mgmt.containerservice.models.LocalDNSMode
+ :keyword vnet_dns_overrides: VnetDNS overrides apply to DNS traffic from pods with
+ dnsPolicy:default or kubelet (referred to as VnetDNS traffic).
+ :paramtype vnet_dns_overrides: dict[str, ~azure.mgmt.containerservice.models.LocalDNSOverride]
+ :keyword kube_dns_overrides: KubeDNS overrides apply to DNS traffic from pods with
+ dnsPolicy:ClusterFirst (referred to as KubeDNS traffic).
+ :paramtype kube_dns_overrides: dict[str, ~azure.mgmt.containerservice.models.LocalDNSOverride]
+ """
+ super().__init__(**kwargs)
+ self.mode = mode
+ self.state: Optional[Union[str, "_models.LocalDNSState"]] = None
+ self.vnet_dns_overrides = vnet_dns_overrides
+ self.kube_dns_overrides = kube_dns_overrides
+
+
+class Machine(SubResource):
+ """A machine. Contains details about the underlying virtual machine. A machine may be visible here
+ but not in kubectl get nodes; if so it may be because the machine has not been registered with
+ the Kubernetes API Server yet.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Resource ID.
+ :vartype id: str
+ :ivar name: The name of the resource that is unique within a resource group. This name can be
+ used to access the resource.
+ :vartype name: str
+ :ivar type: Resource type.
+ :vartype type: str
+ :ivar zones: The Availability zone in which machine is located.
+ :vartype zones: list[str]
+ :ivar properties: The properties of the machine.
+ :vartype properties: ~azure.mgmt.containerservice.models.MachineProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "zones": {"readonly": True},
+ "properties": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "zones": {"key": "zones", "type": "[str]"},
+ "properties": {"key": "properties", "type": "MachineProperties"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.zones: Optional[list[str]] = None
+ self.properties: Optional["_models.MachineProperties"] = None
+
+
+class MachineIpAddress(_serialization.Model):
+ """The machine IP address details.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar family: To determine if address belongs IPv4 or IPv6 family. Known values are: "IPv4" and
+ "IPv6".
+ :vartype family: str or ~azure.mgmt.containerservice.models.IpFamily
+ :ivar ip: IPv4 or IPv6 address of the machine.
+ :vartype ip: str
+ """
+
+ _validation = {
+ "family": {"readonly": True},
+ "ip": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "family": {"key": "family", "type": "str"},
+ "ip": {"key": "ip", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.family: Optional[Union[str, "_models.IpFamily"]] = None
+ self.ip: Optional[str] = None
+
+
+class MachineListResult(_serialization.Model):
+ """The response from the List Machines operation.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar next_link: The URL to get the next set of machine results.
+ :vartype next_link: str
+ :ivar value: The list of Machines in cluster.
+ :vartype value: list[~azure.mgmt.containerservice.models.Machine]
+ """
+
+ _validation = {
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "next_link": {"key": "nextLink", "type": "str"},
+ "value": {"key": "value", "type": "[Machine]"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.Machine"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The list of Machines in cluster.
+ :paramtype value: list[~azure.mgmt.containerservice.models.Machine]
+ """
+ super().__init__(**kwargs)
+ self.next_link: Optional[str] = None
+ self.value = value
+
+
+class MachineNetworkProperties(_serialization.Model):
+ """network properties of the machine.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar ip_addresses: IPv4, IPv6 addresses of the machine.
+ :vartype ip_addresses: list[~azure.mgmt.containerservice.models.MachineIpAddress]
+ """
+
+ _validation = {
+ "ip_addresses": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "ip_addresses": {"key": "ipAddresses", "type": "[MachineIpAddress]"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.ip_addresses: Optional[list["_models.MachineIpAddress"]] = None
+
+
+class MachineProperties(_serialization.Model):
+ """The properties of the machine.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar network: network properties of the machine.
+ :vartype network: ~azure.mgmt.containerservice.models.MachineNetworkProperties
+ :ivar resource_id: Azure resource id of the machine. It can be used to GET underlying VM
+ Instance.
+ :vartype resource_id: str
+ """
+
+ _validation = {
+ "network": {"readonly": True},
+ "resource_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "network": {"key": "network", "type": "MachineNetworkProperties"},
+ "resource_id": {"key": "resourceId", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.network: Optional["_models.MachineNetworkProperties"] = None
+ self.resource_id: Optional[str] = None
+
+
+class MaintenanceConfiguration(SubResource):
+ """Planned maintenance configuration, used to configure when updates can be deployed to a Managed
+ Cluster. See `planned maintenance `_
+ for more information about planned maintenance.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Resource ID.
+ :vartype id: str
+ :ivar name: The name of the resource that is unique within a resource group. This name can be
+ used to access the resource.
+ :vartype name: str
+ :ivar type: Resource type.
+ :vartype type: str
+ :ivar system_data: The system metadata relating to this resource.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar time_in_week: Time slots during the week when planned maintenance is allowed to proceed.
+ If two array entries specify the same day of the week, the applied configuration is the union
+ of times in both entries.
+ :vartype time_in_week: list[~azure.mgmt.containerservice.models.TimeInWeek]
+ :ivar not_allowed_time: Time slots on which upgrade is not allowed.
+ :vartype not_allowed_time: list[~azure.mgmt.containerservice.models.TimeSpan]
+ :ivar maintenance_window: Maintenance window for the maintenance configuration.
+ :vartype maintenance_window: ~azure.mgmt.containerservice.models.MaintenanceWindow
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "time_in_week": {"key": "properties.timeInWeek", "type": "[TimeInWeek]"},
+ "not_allowed_time": {"key": "properties.notAllowedTime", "type": "[TimeSpan]"},
+ "maintenance_window": {"key": "properties.maintenanceWindow", "type": "MaintenanceWindow"},
+ }
+
+ def __init__(
+ self,
+ *,
+ time_in_week: Optional[list["_models.TimeInWeek"]] = None,
+ not_allowed_time: Optional[list["_models.TimeSpan"]] = None,
+ maintenance_window: Optional["_models.MaintenanceWindow"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword time_in_week: Time slots during the week when planned maintenance is allowed to
+ proceed. If two array entries specify the same day of the week, the applied configuration is
+ the union of times in both entries.
+ :paramtype time_in_week: list[~azure.mgmt.containerservice.models.TimeInWeek]
+ :keyword not_allowed_time: Time slots on which upgrade is not allowed.
+ :paramtype not_allowed_time: list[~azure.mgmt.containerservice.models.TimeSpan]
+ :keyword maintenance_window: Maintenance window for the maintenance configuration.
+ :paramtype maintenance_window: ~azure.mgmt.containerservice.models.MaintenanceWindow
+ """
+ super().__init__(**kwargs)
+ self.system_data: Optional["_models.SystemData"] = None
+ self.time_in_week = time_in_week
+ self.not_allowed_time = not_allowed_time
+ self.maintenance_window = maintenance_window
+
+
+class MaintenanceConfigurationListResult(_serialization.Model):
+ """The response from the List maintenance configurations operation.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of maintenance configurations.
+ :vartype value: list[~azure.mgmt.containerservice.models.MaintenanceConfiguration]
+ :ivar next_link: The URL to get the next set of maintenance configuration results.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[MaintenanceConfiguration]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.MaintenanceConfiguration"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The list of maintenance configurations.
+ :paramtype value: list[~azure.mgmt.containerservice.models.MaintenanceConfiguration]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link: Optional[str] = None
+
+
+class MaintenanceWindow(_serialization.Model):
+ """Maintenance window used to configure scheduled auto-upgrade for a Managed Cluster.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar schedule: Recurrence schedule for the maintenance window. Required.
+ :vartype schedule: ~azure.mgmt.containerservice.models.Schedule
+ :ivar duration_hours: Length of maintenance window range from 4 to 24 hours.
+ :vartype duration_hours: int
+ :ivar utc_offset: The UTC offset in format +/-HH:mm. For example, '+05:30' for IST and '-07:00'
+ for PST. If not specified, the default is '+00:00'.
+ :vartype utc_offset: str
+ :ivar start_date: The date the maintenance window activates. If the current date is before this
+ date, the maintenance window is inactive and will not be used for upgrades. If not specified,
+ the maintenance window will be active right away.
+ :vartype start_date: ~datetime.date
+ :ivar start_time: The start time of the maintenance window. Accepted values are from '00:00' to
+ '23:59'. 'utcOffset' applies to this field. For example: '02:00' with 'utcOffset: +02:00' means
+ UTC time '00:00'. Required.
+ :vartype start_time: str
+ :ivar not_allowed_dates: Date ranges on which upgrade is not allowed. 'utcOffset' applies to
+ this field. For example, with 'utcOffset: +02:00' and 'dateSpan' being '2022-12-23' to
+ '2023-01-03', maintenance will be blocked from '2022-12-22 22:00' to '2023-01-03 22:00' in UTC
+ time.
+ :vartype not_allowed_dates: list[~azure.mgmt.containerservice.models.DateSpan]
+ """
+
+ _validation = {
+ "schedule": {"required": True},
+ "duration_hours": {"required": True, "maximum": 24, "minimum": 4},
+ "utc_offset": {"pattern": r"^(-|\+)[0-9]{2}:[0-9]{2}$"},
+ "start_time": {"required": True, "pattern": r"^\d{2}:\d{2}$"},
+ }
+
+ _attribute_map = {
+ "schedule": {"key": "schedule", "type": "Schedule"},
+ "duration_hours": {"key": "durationHours", "type": "int"},
+ "utc_offset": {"key": "utcOffset", "type": "str"},
+ "start_date": {"key": "startDate", "type": "date"},
+ "start_time": {"key": "startTime", "type": "str"},
+ "not_allowed_dates": {"key": "notAllowedDates", "type": "[DateSpan]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ schedule: "_models.Schedule",
+ duration_hours: int = 24,
+ start_time: str,
+ utc_offset: Optional[str] = None,
+ start_date: Optional[datetime.date] = None,
+ not_allowed_dates: Optional[list["_models.DateSpan"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword schedule: Recurrence schedule for the maintenance window. Required.
+ :paramtype schedule: ~azure.mgmt.containerservice.models.Schedule
+ :keyword duration_hours: Length of maintenance window range from 4 to 24 hours.
+ :paramtype duration_hours: int
+ :keyword utc_offset: The UTC offset in format +/-HH:mm. For example, '+05:30' for IST and
+ '-07:00' for PST. If not specified, the default is '+00:00'.
+ :paramtype utc_offset: str
+ :keyword start_date: The date the maintenance window activates. If the current date is before
+ this date, the maintenance window is inactive and will not be used for upgrades. If not
+ specified, the maintenance window will be active right away.
+ :paramtype start_date: ~datetime.date
+ :keyword start_time: The start time of the maintenance window. Accepted values are from '00:00'
+ to '23:59'. 'utcOffset' applies to this field. For example: '02:00' with 'utcOffset: +02:00'
+ means UTC time '00:00'. Required.
+ :paramtype start_time: str
+ :keyword not_allowed_dates: Date ranges on which upgrade is not allowed. 'utcOffset' applies to
+ this field. For example, with 'utcOffset: +02:00' and 'dateSpan' being '2022-12-23' to
+ '2023-01-03', maintenance will be blocked from '2022-12-22 22:00' to '2023-01-03 22:00' in UTC
+ time.
+ :paramtype not_allowed_dates: list[~azure.mgmt.containerservice.models.DateSpan]
+ """
+ super().__init__(**kwargs)
+ self.schedule = schedule
+ self.duration_hours = duration_hours
+ self.utc_offset = utc_offset
+ self.start_date = start_date
+ self.start_time = start_time
+ self.not_allowed_dates = not_allowed_dates
+
+
+class Resource(_serialization.Model):
+ """Common fields that are returned in the response for all Azure Resource Manager resources.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.id: Optional[str] = None
+ self.name: Optional[str] = None
+ self.type: Optional[str] = None
+ self.system_data: Optional["_models.SystemData"] = None
+
+
+class TrackedResource(Resource):
+ """The resource model definition for an Azure Resource Manager tracked top level resource which
+ has 'tags' and a 'location'.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ }
+
+ def __init__(self, *, location: str, tags: Optional[dict[str, str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ """
+ super().__init__(**kwargs)
+ self.tags = tags
+ self.location = location
+
+
+class ManagedCluster(TrackedResource):
+ """Managed cluster.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value
+ will change when the resource is updated. Specify an if-match or if-none-match header with the
+ eTag value for a subsequent request to enable optimistic concurrency per the normal eTag
+ convention.
+ :vartype e_tag: str
+ :ivar sku: The managed cluster SKU.
+ :vartype sku: ~azure.mgmt.containerservice.models.ManagedClusterSKU
+ :ivar extended_location: The extended location of the Virtual Machine.
+ :vartype extended_location: ~azure.mgmt.containerservice.models.ExtendedLocation
+ :ivar identity: The identity of the managed cluster, if configured.
+ :vartype identity: ~azure.mgmt.containerservice.models.ManagedClusterIdentity
+ :ivar kind: This is primarily used to expose different UI experiences in the portal for
+ different kinds.
+ :vartype kind: str
+ :ivar provisioning_state: The current provisioning state.
+ :vartype provisioning_state: str
+ :ivar power_state: The Power State of the cluster.
+ :vartype power_state: ~azure.mgmt.containerservice.models.PowerState
+ :ivar max_agent_pools: The max number of agent pools for the managed cluster.
+ :vartype max_agent_pools: int
+ :ivar kubernetes_version: The version of Kubernetes specified by the user. Both patch version
+ (e.g. 1.20.13) and (e.g. 1.20) are supported. When
+ is specified, the latest supported GA patch version is chosen automatically.
+ Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14)
+ will not trigger an upgrade, even if a newer patch version is available. When you upgrade a
+ supported AKS cluster, Kubernetes minor versions cannot be skipped. All upgrades must be
+ performed sequentially by major version number. For example, upgrades between 1.14.x -> 1.15.x
+ or 1.15.x -> 1.16.x are allowed, however 1.14.x -> 1.16.x is not allowed. See `upgrading an AKS
+ cluster `_ for more details.
+ :vartype kubernetes_version: str
+ :ivar current_kubernetes_version: The version of Kubernetes the Managed Cluster is running. If
+ kubernetesVersion was a fully specified version , this field will be exactly
+ equal to it. If kubernetesVersion was , this field will contain the full
+ version being used.
+ :vartype current_kubernetes_version: str
+ :ivar dns_prefix: The DNS prefix of the Managed Cluster. This cannot be updated once the
+ Managed Cluster has been created.
+ :vartype dns_prefix: str
+ :ivar fqdn_subdomain: The FQDN subdomain of the private cluster with custom private dns zone.
+ This cannot be updated once the Managed Cluster has been created.
+ :vartype fqdn_subdomain: str
+ :ivar fqdn: The FQDN of the master pool.
+ :vartype fqdn: str
+ :ivar private_fqdn: The FQDN of private cluster.
+ :vartype private_fqdn: str
+ :ivar azure_portal_fqdn: The special FQDN used by the Azure Portal to access the Managed
+ Cluster. This FQDN is for use only by the Azure Portal and should not be used by other clients.
+ The Azure Portal requires certain Cross-Origin Resource Sharing (CORS) headers to be sent in
+ some responses, which Kubernetes APIServer doesn't handle by default. This special FQDN
+ supports CORS, allowing the Azure Portal to function properly.
+ :vartype azure_portal_fqdn: str
+ :ivar agent_pool_profiles: The agent pool properties.
+ :vartype agent_pool_profiles:
+ list[~azure.mgmt.containerservice.models.ManagedClusterAgentPoolProfile]
+ :ivar linux_profile: The profile for Linux VMs in the Managed Cluster.
+ :vartype linux_profile: ~azure.mgmt.containerservice.models.ContainerServiceLinuxProfile
+ :ivar windows_profile: The profile for Windows VMs in the Managed Cluster.
+ :vartype windows_profile: ~azure.mgmt.containerservice.models.ManagedClusterWindowsProfile
+ :ivar service_principal_profile: Information about a service principal identity for the cluster
+ to use for manipulating Azure APIs.
+ :vartype service_principal_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile
+ :ivar addon_profiles: The profile of managed cluster add-on.
+ :vartype addon_profiles: dict[str,
+ ~azure.mgmt.containerservice.models.ManagedClusterAddonProfile]
+ :ivar pod_identity_profile: The pod identity profile of the Managed Cluster. See `use AAD pod
+ identity `_ for more details on
+ AAD pod identity integration.
+ :vartype pod_identity_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProfile
+ :ivar oidc_issuer_profile: The OIDC issuer profile of the Managed Cluster.
+ :vartype oidc_issuer_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterOIDCIssuerProfile
+ :ivar node_resource_group: The name of the resource group containing agent pool nodes.
+ :vartype node_resource_group: str
+ :ivar node_resource_group_profile: Profile of the node resource group configuration.
+ :vartype node_resource_group_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterNodeResourceGroupProfile
+ :ivar enable_rbac: Whether to enable Kubernetes Role-Based Access Control.
+ :vartype enable_rbac: bool
+ :ivar support_plan: The support plan for the Managed Cluster. If unspecified, the default is
+ 'KubernetesOfficial'. Known values are: "KubernetesOfficial" and "AKSLongTermSupport".
+ :vartype support_plan: str or ~azure.mgmt.containerservice.models.KubernetesSupportPlan
+ :ivar network_profile: The network configuration profile.
+ :vartype network_profile: ~azure.mgmt.containerservice.models.ContainerServiceNetworkProfile
+ :ivar aad_profile: The Azure Active Directory configuration.
+ :vartype aad_profile: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile
+ :ivar auto_upgrade_profile: The auto upgrade configuration.
+ :vartype auto_upgrade_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterAutoUpgradeProfile
+ :ivar upgrade_settings: Settings for upgrading a cluster.
+ :vartype upgrade_settings: ~azure.mgmt.containerservice.models.ClusterUpgradeSettings
+ :ivar auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled.
+ :vartype auto_scaler_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterPropertiesAutoScalerProfile
+ :ivar api_server_access_profile: The access profile for managed cluster API server.
+ :vartype api_server_access_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterAPIServerAccessProfile
+ :ivar disk_encryption_set_id: The Resource ID of the disk encryption set to use for enabling
+ encryption at rest. This is of the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}'.
+ :vartype disk_encryption_set_id: str
+ :ivar identity_profile: The user identity associated with the managed cluster. This identity
+ will be used by the kubelet. Only one user assigned identity is allowed. The only accepted key
+ is "kubeletidentity", with value of "resourceId":
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}".
+ :vartype identity_profile: dict[str, ~azure.mgmt.containerservice.models.UserAssignedIdentity]
+ :ivar private_link_resources: Private link resources associated with the cluster.
+ :vartype private_link_resources: list[~azure.mgmt.containerservice.models.PrivateLinkResource]
+ :ivar disable_local_accounts: If local accounts should be disabled on the Managed Cluster. If
+ set to true, getting static credentials will be disabled for this cluster. This must only be
+ used on Managed Clusters that are AAD enabled. For more details see `disable local accounts
+ `_.
+ :vartype disable_local_accounts: bool
+ :ivar http_proxy_config: Configurations for provisioning the cluster with HTTP proxy servers.
+ :vartype http_proxy_config: ~azure.mgmt.containerservice.models.ManagedClusterHTTPProxyConfig
+ :ivar security_profile: Security profile for the managed cluster.
+ :vartype security_profile: ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfile
+ :ivar storage_profile: Storage profile for the managed cluster.
+ :vartype storage_profile: ~azure.mgmt.containerservice.models.ManagedClusterStorageProfile
+ :ivar ingress_profile: Ingress profile for the managed cluster.
+ :vartype ingress_profile: ~azure.mgmt.containerservice.models.ManagedClusterIngressProfile
+ :ivar public_network_access: PublicNetworkAccess of the managedCluster. Allow or deny public
+ network access for AKS. Known values are: "Enabled" and "Disabled".
+ :vartype public_network_access: str or ~azure.mgmt.containerservice.models.PublicNetworkAccess
+ :ivar workload_auto_scaler_profile: Workload Auto-scaler profile for the managed cluster.
+ :vartype workload_auto_scaler_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfile
+ :ivar azure_monitor_profile: Azure Monitor addon profiles for monitoring the managed cluster.
+ :vartype azure_monitor_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfile
+ :ivar service_mesh_profile: Service mesh profile for a managed cluster.
+ :vartype service_mesh_profile: ~azure.mgmt.containerservice.models.ServiceMeshProfile
+ :ivar resource_uid: The resourceUID uniquely identifies ManagedClusters that reuse ARM
+ ResourceIds (i.e: create, delete, create sequence).
+ :vartype resource_uid: str
+ :ivar metrics_profile: Optional cluster metrics configuration.
+ :vartype metrics_profile: ~azure.mgmt.containerservice.models.ManagedClusterMetricsProfile
+ :ivar node_provisioning_profile: Node provisioning settings that apply to the whole cluster.
+ :vartype node_provisioning_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterNodeProvisioningProfile
+ :ivar bootstrap_profile: Profile of the cluster bootstrap configuration.
+ :vartype bootstrap_profile: ~azure.mgmt.containerservice.models.ManagedClusterBootstrapProfile
+ :ivar ai_toolchain_operator_profile: AI toolchain operator settings that apply to the whole
+ cluster.
+ :vartype ai_toolchain_operator_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterAIToolchainOperatorProfile
+ :ivar status: Contains read-only information about the Managed Cluster.
+ :vartype status: ~azure.mgmt.containerservice.models.ManagedClusterStatus
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ "e_tag": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ "power_state": {"readonly": True},
+ "max_agent_pools": {"readonly": True},
+ "current_kubernetes_version": {"readonly": True},
+ "fqdn": {"readonly": True},
+ "private_fqdn": {"readonly": True},
+ "azure_portal_fqdn": {"readonly": True},
+ "resource_uid": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "e_tag": {"key": "eTag", "type": "str"},
+ "sku": {"key": "sku", "type": "ManagedClusterSKU"},
+ "extended_location": {"key": "extendedLocation", "type": "ExtendedLocation"},
+ "identity": {"key": "identity", "type": "ManagedClusterIdentity"},
+ "kind": {"key": "kind", "type": "str"},
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "power_state": {"key": "properties.powerState", "type": "PowerState"},
+ "max_agent_pools": {"key": "properties.maxAgentPools", "type": "int"},
+ "kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"},
+ "current_kubernetes_version": {"key": "properties.currentKubernetesVersion", "type": "str"},
+ "dns_prefix": {"key": "properties.dnsPrefix", "type": "str"},
+ "fqdn_subdomain": {"key": "properties.fqdnSubdomain", "type": "str"},
+ "fqdn": {"key": "properties.fqdn", "type": "str"},
+ "private_fqdn": {"key": "properties.privateFQDN", "type": "str"},
+ "azure_portal_fqdn": {"key": "properties.azurePortalFQDN", "type": "str"},
+ "agent_pool_profiles": {"key": "properties.agentPoolProfiles", "type": "[ManagedClusterAgentPoolProfile]"},
+ "linux_profile": {"key": "properties.linuxProfile", "type": "ContainerServiceLinuxProfile"},
+ "windows_profile": {"key": "properties.windowsProfile", "type": "ManagedClusterWindowsProfile"},
+ "service_principal_profile": {
+ "key": "properties.servicePrincipalProfile",
+ "type": "ManagedClusterServicePrincipalProfile",
+ },
+ "addon_profiles": {"key": "properties.addonProfiles", "type": "{ManagedClusterAddonProfile}"},
+ "pod_identity_profile": {"key": "properties.podIdentityProfile", "type": "ManagedClusterPodIdentityProfile"},
+ "oidc_issuer_profile": {"key": "properties.oidcIssuerProfile", "type": "ManagedClusterOIDCIssuerProfile"},
+ "node_resource_group": {"key": "properties.nodeResourceGroup", "type": "str"},
+ "node_resource_group_profile": {
+ "key": "properties.nodeResourceGroupProfile",
+ "type": "ManagedClusterNodeResourceGroupProfile",
+ },
+ "enable_rbac": {"key": "properties.enableRBAC", "type": "bool"},
+ "support_plan": {"key": "properties.supportPlan", "type": "str"},
+ "network_profile": {"key": "properties.networkProfile", "type": "ContainerServiceNetworkProfile"},
+ "aad_profile": {"key": "properties.aadProfile", "type": "ManagedClusterAADProfile"},
+ "auto_upgrade_profile": {"key": "properties.autoUpgradeProfile", "type": "ManagedClusterAutoUpgradeProfile"},
+ "upgrade_settings": {"key": "properties.upgradeSettings", "type": "ClusterUpgradeSettings"},
+ "auto_scaler_profile": {
+ "key": "properties.autoScalerProfile",
+ "type": "ManagedClusterPropertiesAutoScalerProfile",
+ },
+ "api_server_access_profile": {
+ "key": "properties.apiServerAccessProfile",
+ "type": "ManagedClusterAPIServerAccessProfile",
+ },
+ "disk_encryption_set_id": {"key": "properties.diskEncryptionSetID", "type": "str"},
+ "identity_profile": {"key": "properties.identityProfile", "type": "{UserAssignedIdentity}"},
+ "private_link_resources": {"key": "properties.privateLinkResources", "type": "[PrivateLinkResource]"},
+ "disable_local_accounts": {"key": "properties.disableLocalAccounts", "type": "bool"},
+ "http_proxy_config": {"key": "properties.httpProxyConfig", "type": "ManagedClusterHTTPProxyConfig"},
+ "security_profile": {"key": "properties.securityProfile", "type": "ManagedClusterSecurityProfile"},
+ "storage_profile": {"key": "properties.storageProfile", "type": "ManagedClusterStorageProfile"},
+ "ingress_profile": {"key": "properties.ingressProfile", "type": "ManagedClusterIngressProfile"},
+ "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"},
+ "workload_auto_scaler_profile": {
+ "key": "properties.workloadAutoScalerProfile",
+ "type": "ManagedClusterWorkloadAutoScalerProfile",
+ },
+ "azure_monitor_profile": {"key": "properties.azureMonitorProfile", "type": "ManagedClusterAzureMonitorProfile"},
+ "service_mesh_profile": {"key": "properties.serviceMeshProfile", "type": "ServiceMeshProfile"},
+ "resource_uid": {"key": "properties.resourceUID", "type": "str"},
+ "metrics_profile": {"key": "properties.metricsProfile", "type": "ManagedClusterMetricsProfile"},
+ "node_provisioning_profile": {
+ "key": "properties.nodeProvisioningProfile",
+ "type": "ManagedClusterNodeProvisioningProfile",
+ },
+ "bootstrap_profile": {"key": "properties.bootstrapProfile", "type": "ManagedClusterBootstrapProfile"},
+ "ai_toolchain_operator_profile": {
+ "key": "properties.aiToolchainOperatorProfile",
+ "type": "ManagedClusterAIToolchainOperatorProfile",
+ },
+ "status": {"key": "properties.status", "type": "ManagedClusterStatus"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ location: str,
+ tags: Optional[dict[str, str]] = None,
+ sku: Optional["_models.ManagedClusterSKU"] = None,
+ extended_location: Optional["_models.ExtendedLocation"] = None,
+ identity: Optional["_models.ManagedClusterIdentity"] = None,
+ kind: Optional[str] = None,
+ kubernetes_version: Optional[str] = None,
+ dns_prefix: Optional[str] = None,
+ fqdn_subdomain: Optional[str] = None,
+ agent_pool_profiles: Optional[list["_models.ManagedClusterAgentPoolProfile"]] = None,
+ linux_profile: Optional["_models.ContainerServiceLinuxProfile"] = None,
+ windows_profile: Optional["_models.ManagedClusterWindowsProfile"] = None,
+ service_principal_profile: Optional["_models.ManagedClusterServicePrincipalProfile"] = None,
+ addon_profiles: Optional[dict[str, "_models.ManagedClusterAddonProfile"]] = None,
+ pod_identity_profile: Optional["_models.ManagedClusterPodIdentityProfile"] = None,
+ oidc_issuer_profile: Optional["_models.ManagedClusterOIDCIssuerProfile"] = None,
+ node_resource_group: Optional[str] = None,
+ node_resource_group_profile: Optional["_models.ManagedClusterNodeResourceGroupProfile"] = None,
+ enable_rbac: Optional[bool] = None,
+ support_plan: Optional[Union[str, "_models.KubernetesSupportPlan"]] = None,
+ network_profile: Optional["_models.ContainerServiceNetworkProfile"] = None,
+ aad_profile: Optional["_models.ManagedClusterAADProfile"] = None,
+ auto_upgrade_profile: Optional["_models.ManagedClusterAutoUpgradeProfile"] = None,
+ upgrade_settings: Optional["_models.ClusterUpgradeSettings"] = None,
+ auto_scaler_profile: Optional["_models.ManagedClusterPropertiesAutoScalerProfile"] = None,
+ api_server_access_profile: Optional["_models.ManagedClusterAPIServerAccessProfile"] = None,
+ disk_encryption_set_id: Optional[str] = None,
+ identity_profile: Optional[dict[str, "_models.UserAssignedIdentity"]] = None,
+ private_link_resources: Optional[list["_models.PrivateLinkResource"]] = None,
+ disable_local_accounts: Optional[bool] = None,
+ http_proxy_config: Optional["_models.ManagedClusterHTTPProxyConfig"] = None,
+ security_profile: Optional["_models.ManagedClusterSecurityProfile"] = None,
+ storage_profile: Optional["_models.ManagedClusterStorageProfile"] = None,
+ ingress_profile: Optional["_models.ManagedClusterIngressProfile"] = None,
+ public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None,
+ workload_auto_scaler_profile: Optional["_models.ManagedClusterWorkloadAutoScalerProfile"] = None,
+ azure_monitor_profile: Optional["_models.ManagedClusterAzureMonitorProfile"] = None,
+ service_mesh_profile: Optional["_models.ServiceMeshProfile"] = None,
+ metrics_profile: Optional["_models.ManagedClusterMetricsProfile"] = None,
+ node_provisioning_profile: Optional["_models.ManagedClusterNodeProvisioningProfile"] = None,
+ bootstrap_profile: Optional["_models.ManagedClusterBootstrapProfile"] = None,
+ ai_toolchain_operator_profile: Optional["_models.ManagedClusterAIToolchainOperatorProfile"] = None,
+ status: Optional["_models.ManagedClusterStatus"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword sku: The managed cluster SKU.
+ :paramtype sku: ~azure.mgmt.containerservice.models.ManagedClusterSKU
+ :keyword extended_location: The extended location of the Virtual Machine.
+ :paramtype extended_location: ~azure.mgmt.containerservice.models.ExtendedLocation
+ :keyword identity: The identity of the managed cluster, if configured.
+ :paramtype identity: ~azure.mgmt.containerservice.models.ManagedClusterIdentity
+ :keyword kind: This is primarily used to expose different UI experiences in the portal for
+ different kinds.
+ :paramtype kind: str
+ :keyword kubernetes_version: The version of Kubernetes specified by the user. Both patch
+ version (e.g. 1.20.13) and (e.g. 1.20) are supported. When
+ is specified, the latest supported GA patch version is chosen automatically.
+ Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14)
+ will not trigger an upgrade, even if a newer patch version is available. When you upgrade a
+ supported AKS cluster, Kubernetes minor versions cannot be skipped. All upgrades must be
+ performed sequentially by major version number. For example, upgrades between 1.14.x -> 1.15.x
+ or 1.15.x -> 1.16.x are allowed, however 1.14.x -> 1.16.x is not allowed. See `upgrading an AKS
+ cluster `_ for more details.
+ :paramtype kubernetes_version: str
+ :keyword dns_prefix: The DNS prefix of the Managed Cluster. This cannot be updated once the
+ Managed Cluster has been created.
+ :paramtype dns_prefix: str
+ :keyword fqdn_subdomain: The FQDN subdomain of the private cluster with custom private dns
+ zone. This cannot be updated once the Managed Cluster has been created.
+ :paramtype fqdn_subdomain: str
+ :keyword agent_pool_profiles: The agent pool properties.
+ :paramtype agent_pool_profiles:
+ list[~azure.mgmt.containerservice.models.ManagedClusterAgentPoolProfile]
+ :keyword linux_profile: The profile for Linux VMs in the Managed Cluster.
+ :paramtype linux_profile: ~azure.mgmt.containerservice.models.ContainerServiceLinuxProfile
+ :keyword windows_profile: The profile for Windows VMs in the Managed Cluster.
+ :paramtype windows_profile: ~azure.mgmt.containerservice.models.ManagedClusterWindowsProfile
+ :keyword service_principal_profile: Information about a service principal identity for the
+ cluster to use for manipulating Azure APIs.
+ :paramtype service_principal_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile
+ :keyword addon_profiles: The profile of managed cluster add-on.
+ :paramtype addon_profiles: dict[str,
+ ~azure.mgmt.containerservice.models.ManagedClusterAddonProfile]
+ :keyword pod_identity_profile: The pod identity profile of the Managed Cluster. See `use AAD
+ pod identity `_ for more
+ details on AAD pod identity integration.
+ :paramtype pod_identity_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProfile
+ :keyword oidc_issuer_profile: The OIDC issuer profile of the Managed Cluster.
+ :paramtype oidc_issuer_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterOIDCIssuerProfile
+ :keyword node_resource_group: The name of the resource group containing agent pool nodes.
+ :paramtype node_resource_group: str
+ :keyword node_resource_group_profile: Profile of the node resource group configuration.
+ :paramtype node_resource_group_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterNodeResourceGroupProfile
+ :keyword enable_rbac: Whether to enable Kubernetes Role-Based Access Control.
+ :paramtype enable_rbac: bool
+ :keyword support_plan: The support plan for the Managed Cluster. If unspecified, the default is
+ 'KubernetesOfficial'. Known values are: "KubernetesOfficial" and "AKSLongTermSupport".
+ :paramtype support_plan: str or ~azure.mgmt.containerservice.models.KubernetesSupportPlan
+ :keyword network_profile: The network configuration profile.
+ :paramtype network_profile: ~azure.mgmt.containerservice.models.ContainerServiceNetworkProfile
+ :keyword aad_profile: The Azure Active Directory configuration.
+ :paramtype aad_profile: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile
+ :keyword auto_upgrade_profile: The auto upgrade configuration.
+ :paramtype auto_upgrade_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterAutoUpgradeProfile
+ :keyword upgrade_settings: Settings for upgrading a cluster.
+ :paramtype upgrade_settings: ~azure.mgmt.containerservice.models.ClusterUpgradeSettings
+ :keyword auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled.
+ :paramtype auto_scaler_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterPropertiesAutoScalerProfile
+ :keyword api_server_access_profile: The access profile for managed cluster API server.
+ :paramtype api_server_access_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterAPIServerAccessProfile
+ :keyword disk_encryption_set_id: The Resource ID of the disk encryption set to use for enabling
+ encryption at rest. This is of the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{encryptionSetName}'.
+ :paramtype disk_encryption_set_id: str
+ :keyword identity_profile: The user identity associated with the managed cluster. This identity
+ will be used by the kubelet. Only one user assigned identity is allowed. The only accepted key
+ is "kubeletidentity", with value of "resourceId":
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}".
+ :paramtype identity_profile: dict[str,
+ ~azure.mgmt.containerservice.models.UserAssignedIdentity]
+ :keyword private_link_resources: Private link resources associated with the cluster.
+ :paramtype private_link_resources:
+ list[~azure.mgmt.containerservice.models.PrivateLinkResource]
+ :keyword disable_local_accounts: If local accounts should be disabled on the Managed Cluster.
+ If set to true, getting static credentials will be disabled for this cluster. This must only be
+ used on Managed Clusters that are AAD enabled. For more details see `disable local accounts
+ `_.
+ :paramtype disable_local_accounts: bool
+ :keyword http_proxy_config: Configurations for provisioning the cluster with HTTP proxy
+ servers.
+ :paramtype http_proxy_config: ~azure.mgmt.containerservice.models.ManagedClusterHTTPProxyConfig
+ :keyword security_profile: Security profile for the managed cluster.
+ :paramtype security_profile: ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfile
+ :keyword storage_profile: Storage profile for the managed cluster.
+ :paramtype storage_profile: ~azure.mgmt.containerservice.models.ManagedClusterStorageProfile
+ :keyword ingress_profile: Ingress profile for the managed cluster.
+ :paramtype ingress_profile: ~azure.mgmt.containerservice.models.ManagedClusterIngressProfile
+ :keyword public_network_access: PublicNetworkAccess of the managedCluster. Allow or deny public
+ network access for AKS. Known values are: "Enabled" and "Disabled".
+ :paramtype public_network_access: str or
+ ~azure.mgmt.containerservice.models.PublicNetworkAccess
+ :keyword workload_auto_scaler_profile: Workload Auto-scaler profile for the managed cluster.
+ :paramtype workload_auto_scaler_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfile
+ :keyword azure_monitor_profile: Azure Monitor addon profiles for monitoring the managed
+ cluster.
+ :paramtype azure_monitor_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfile
+ :keyword service_mesh_profile: Service mesh profile for a managed cluster.
+ :paramtype service_mesh_profile: ~azure.mgmt.containerservice.models.ServiceMeshProfile
+ :keyword metrics_profile: Optional cluster metrics configuration.
+ :paramtype metrics_profile: ~azure.mgmt.containerservice.models.ManagedClusterMetricsProfile
+ :keyword node_provisioning_profile: Node provisioning settings that apply to the whole cluster.
+ :paramtype node_provisioning_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterNodeProvisioningProfile
+ :keyword bootstrap_profile: Profile of the cluster bootstrap configuration.
+ :paramtype bootstrap_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterBootstrapProfile
+ :keyword ai_toolchain_operator_profile: AI toolchain operator settings that apply to the whole
+ cluster.
+ :paramtype ai_toolchain_operator_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterAIToolchainOperatorProfile
+ :keyword status: Contains read-only information about the Managed Cluster.
+ :paramtype status: ~azure.mgmt.containerservice.models.ManagedClusterStatus
+ """
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.e_tag: Optional[str] = None
+ self.sku = sku
+ self.extended_location = extended_location
+ self.identity = identity
+ self.kind = kind
+ self.provisioning_state: Optional[str] = None
+ self.power_state: Optional["_models.PowerState"] = None
+ self.max_agent_pools: Optional[int] = None
+ self.kubernetes_version = kubernetes_version
+ self.current_kubernetes_version: Optional[str] = None
+ self.dns_prefix = dns_prefix
+ self.fqdn_subdomain = fqdn_subdomain
+ self.fqdn: Optional[str] = None
+ self.private_fqdn: Optional[str] = None
+ self.azure_portal_fqdn: Optional[str] = None
+ self.agent_pool_profiles = agent_pool_profiles
+ self.linux_profile = linux_profile
+ self.windows_profile = windows_profile
+ self.service_principal_profile = service_principal_profile
+ self.addon_profiles = addon_profiles
+ self.pod_identity_profile = pod_identity_profile
+ self.oidc_issuer_profile = oidc_issuer_profile
+ self.node_resource_group = node_resource_group
+ self.node_resource_group_profile = node_resource_group_profile
+ self.enable_rbac = enable_rbac
+ self.support_plan = support_plan
+ self.network_profile = network_profile
+ self.aad_profile = aad_profile
+ self.auto_upgrade_profile = auto_upgrade_profile
+ self.upgrade_settings = upgrade_settings
+ self.auto_scaler_profile = auto_scaler_profile
+ self.api_server_access_profile = api_server_access_profile
+ self.disk_encryption_set_id = disk_encryption_set_id
+ self.identity_profile = identity_profile
+ self.private_link_resources = private_link_resources
+ self.disable_local_accounts = disable_local_accounts
+ self.http_proxy_config = http_proxy_config
+ self.security_profile = security_profile
+ self.storage_profile = storage_profile
+ self.ingress_profile = ingress_profile
+ self.public_network_access = public_network_access
+ self.workload_auto_scaler_profile = workload_auto_scaler_profile
+ self.azure_monitor_profile = azure_monitor_profile
+ self.service_mesh_profile = service_mesh_profile
+ self.resource_uid: Optional[str] = None
+ self.metrics_profile = metrics_profile
+ self.node_provisioning_profile = node_provisioning_profile
+ self.bootstrap_profile = bootstrap_profile
+ self.ai_toolchain_operator_profile = ai_toolchain_operator_profile
+ self.status = status
+
+
+class ManagedClusterAADProfile(_serialization.Model):
+ """AADProfile specifies attributes for Azure Active Directory integration. For more details see
+ `managed AAD on AKS `_.
+
+ :ivar managed: Whether to enable managed AAD.
+ :vartype managed: bool
+ :ivar enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization.
+ :vartype enable_azure_rbac: bool
+ :ivar admin_group_object_i_ds: The list of AAD group object IDs that will have admin role of
+ the cluster.
+ :vartype admin_group_object_i_ds: list[str]
+ :ivar client_app_id: (DEPRECATED) The client AAD application ID. Learn more at
+ https://aka.ms/aks/aad-legacy.
+ :vartype client_app_id: str
+ :ivar server_app_id: (DEPRECATED) The server AAD application ID. Learn more at
+ https://aka.ms/aks/aad-legacy.
+ :vartype server_app_id: str
+ :ivar server_app_secret: (DEPRECATED) The server AAD application secret. Learn more at
+ https://aka.ms/aks/aad-legacy.
+ :vartype server_app_secret: str
+ :ivar tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the
+ tenant of the deployment subscription.
+ :vartype tenant_id: str
+ """
+
+ _attribute_map = {
+ "managed": {"key": "managed", "type": "bool"},
+ "enable_azure_rbac": {"key": "enableAzureRBAC", "type": "bool"},
+ "admin_group_object_i_ds": {"key": "adminGroupObjectIDs", "type": "[str]"},
+ "client_app_id": {"key": "clientAppID", "type": "str"},
+ "server_app_id": {"key": "serverAppID", "type": "str"},
+ "server_app_secret": {"key": "serverAppSecret", "type": "str"},
+ "tenant_id": {"key": "tenantID", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ managed: Optional[bool] = None,
+ enable_azure_rbac: Optional[bool] = None,
+ admin_group_object_i_ds: Optional[list[str]] = None,
+ client_app_id: Optional[str] = None,
+ server_app_id: Optional[str] = None,
+ server_app_secret: Optional[str] = None,
+ tenant_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword managed: Whether to enable managed AAD.
+ :paramtype managed: bool
+ :keyword enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization.
+ :paramtype enable_azure_rbac: bool
+ :keyword admin_group_object_i_ds: The list of AAD group object IDs that will have admin role of
+ the cluster.
+ :paramtype admin_group_object_i_ds: list[str]
+ :keyword client_app_id: (DEPRECATED) The client AAD application ID. Learn more at
+ https://aka.ms/aks/aad-legacy.
+ :paramtype client_app_id: str
+ :keyword server_app_id: (DEPRECATED) The server AAD application ID. Learn more at
+ https://aka.ms/aks/aad-legacy.
+ :paramtype server_app_id: str
+ :keyword server_app_secret: (DEPRECATED) The server AAD application secret. Learn more at
+ https://aka.ms/aks/aad-legacy.
+ :paramtype server_app_secret: str
+ :keyword tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the
+ tenant of the deployment subscription.
+ :paramtype tenant_id: str
+ """
+ super().__init__(**kwargs)
+ self.managed = managed
+ self.enable_azure_rbac = enable_azure_rbac
+ self.admin_group_object_i_ds = admin_group_object_i_ds
+ self.client_app_id = client_app_id
+ self.server_app_id = server_app_id
+ self.server_app_secret = server_app_secret
+ self.tenant_id = tenant_id
+
+
+class ManagedClusterAccessProfile(TrackedResource):
+ """Managed cluster Access Profile.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar kube_config: Base64-encoded Kubernetes configuration file.
+ :vartype kube_config: bytes
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "kube_config": {"key": "properties.kubeConfig", "type": "bytearray"},
+ }
+
+ def __init__(
+ self,
+ *,
+ location: str,
+ tags: Optional[dict[str, str]] = None,
+ kube_config: Optional[bytes] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword kube_config: Base64-encoded Kubernetes configuration file.
+ :paramtype kube_config: bytes
+ """
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.kube_config = kube_config
+
+
+class ManagedClusterAddonProfile(_serialization.Model):
+ """A Kubernetes add-on profile for a managed cluster.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar enabled: Whether the add-on is enabled or not. Required.
+ :vartype enabled: bool
+ :ivar config: Key-value pairs for configuring an add-on.
+ :vartype config: dict[str, str]
+ :ivar identity: Information of user assigned identity used by this add-on.
+ :vartype identity: ~azure.mgmt.containerservice.models.ManagedClusterAddonProfileIdentity
+ """
+
+ _validation = {
+ "enabled": {"required": True},
+ "identity": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "config": {"key": "config", "type": "{str}"},
+ "identity": {"key": "identity", "type": "ManagedClusterAddonProfileIdentity"},
+ }
+
+ def __init__(self, *, enabled: bool, config: Optional[dict[str, str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether the add-on is enabled or not. Required.
+ :paramtype enabled: bool
+ :keyword config: Key-value pairs for configuring an add-on.
+ :paramtype config: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.config = config
+ self.identity: Optional["_models.ManagedClusterAddonProfileIdentity"] = None
+
+
+class UserAssignedIdentity(_serialization.Model):
+ """Details about a user assigned identity.
+
+ :ivar resource_id: The resource ID of the user assigned identity.
+ :vartype resource_id: str
+ :ivar client_id: The client ID of the user assigned identity.
+ :vartype client_id: str
+ :ivar object_id: The object ID of the user assigned identity.
+ :vartype object_id: str
+ """
+
+ _attribute_map = {
+ "resource_id": {"key": "resourceId", "type": "str"},
+ "client_id": {"key": "clientId", "type": "str"},
+ "object_id": {"key": "objectId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ resource_id: Optional[str] = None,
+ client_id: Optional[str] = None,
+ object_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword resource_id: The resource ID of the user assigned identity.
+ :paramtype resource_id: str
+ :keyword client_id: The client ID of the user assigned identity.
+ :paramtype client_id: str
+ :keyword object_id: The object ID of the user assigned identity.
+ :paramtype object_id: str
+ """
+ super().__init__(**kwargs)
+ self.resource_id = resource_id
+ self.client_id = client_id
+ self.object_id = object_id
+
+
+class ManagedClusterAddonProfileIdentity(UserAssignedIdentity):
+ """Information of user assigned identity used by this add-on.
+
+ :ivar resource_id: The resource ID of the user assigned identity.
+ :vartype resource_id: str
+ :ivar client_id: The client ID of the user assigned identity.
+ :vartype client_id: str
+ :ivar object_id: The object ID of the user assigned identity.
+ :vartype object_id: str
+ """
+
+
+class ManagedClusterAgentPoolProfileProperties(_serialization.Model):
+ """Properties for the container service agent pool profile.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value
+ will change when the resource is updated. Specify an if-match or if-none-match header with the
+ eTag value for a subsequent request to enable optimistic concurrency per the normal eTag
+ convention.
+ :vartype e_tag: str
+ :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the
+ range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for
+ system pools. The default value is 1.
+ :vartype count: int
+ :ivar vm_size: The size of the agent pool VMs. VM size availability varies by region. If a node
+ contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly.
+ For more details on restricted VM sizes, see:
+ https://docs.microsoft.com/azure/aks/quotas-skus-regions.
+ :vartype vm_size: str
+ :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine
+ in the master/agent pool. If you specify 0, it will apply the default osDisk size according to
+ the vmSize specified.
+ :vartype os_disk_size_gb: int
+ :ivar os_disk_type: The OS disk type to be used for machines in the agent pool. The default is
+ 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB.
+ Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see
+ `Ephemeral OS `_.
+ Known values are: "Managed" and "Ephemeral".
+ :vartype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType
+ :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data
+ root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary".
+ :vartype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType
+ :ivar workload_runtime: Determines the type of workload a node can run. Known values are:
+ "OCIContainer", "WasmWasi", and "KataVmIsolation".
+ :vartype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime
+ :ivar message_of_the_day: Message of the day for Linux nodes, base64-encoded. A base64-encoded
+ string which will be written to /etc/motd after decoding. This allows customization of the
+ message of the day for Linux nodes. It must not be specified for Windows nodes. It must be a
+ static string (i.e., will be printed raw and not be executed as a script).
+ :vartype message_of_the_day: str
+ :ivar vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will join
+ on startup. If this is not specified, a VNET and subnet will be generated and used. If no
+ podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :vartype vnet_subnet_id: str
+ :ivar pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, pod
+ IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of
+ the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :vartype pod_subnet_id: str
+ :ivar pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the
+ agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values
+ are: "DynamicIndividual" and "StaticBlock".
+ :vartype pod_ip_allocation_mode: str or ~azure.mgmt.containerservice.models.PodIPAllocationMode
+ :ivar max_pods: The maximum number of pods that can run on a node.
+ :vartype max_pods: int
+ :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and
+ "Windows".
+ :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is
+ Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >=
+ 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3",
+ "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404".
+ :vartype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU
+ :ivar max_count: The maximum number of nodes for auto-scaling.
+ :vartype max_count: int
+ :ivar min_count: The minimum number of nodes for auto-scaling.
+ :vartype min_count: int
+ :ivar enable_auto_scaling: Whether to enable auto-scaler.
+ :vartype enable_auto_scaling: bool
+ :ivar scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also
+ effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values
+ are: "Delete" and "Deallocate".
+ :vartype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode
+ :ivar type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets",
+ "AvailabilitySet", and "VirtualMachines".
+ :vartype type: str or ~azure.mgmt.containerservice.models.AgentPoolType
+ :ivar mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool at
+ all times. For additional information on agent pool restrictions and best practices, see:
+ https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and
+ "Gateway".
+ :vartype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode
+ :ivar orchestrator_version: The version of Kubernetes specified by the user. Both patch version
+ (e.g. 1.20.13) and (e.g. 1.20) are supported. When
+ is specified, the latest supported GA patch version is chosen automatically.
+ Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14)
+ will not trigger an upgrade, even if a newer patch version is available. As a best practice,
+ you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node
+ pool version must have the same major version as the control plane. The node pool minor version
+ must be within two minor versions of the control plane version. The node pool version cannot be
+ greater than the control plane version. For more information see `upgrading a node pool
+ `_.
+ :vartype orchestrator_version: str
+ :ivar current_orchestrator_version: The version of Kubernetes the Agent Pool is running. If
+ orchestratorVersion is a fully specified version , this field will be
+ exactly equal to it. If orchestratorVersion is , this field will contain the full
+ version being used.
+ :vartype current_orchestrator_version: str
+ :ivar node_image_version: The version of node image.
+ :vartype node_image_version: str
+ :ivar upgrade_settings: Settings for upgrading the agentpool.
+ :vartype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings
+ :ivar provisioning_state: The current deployment or provisioning state.
+ :vartype provisioning_state: str
+ :ivar power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first
+ created it is initially Running. The Agent Pool can be stopped by setting this field to
+ Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An
+ Agent Pool can only be stopped if it is Running and provisioning state is Succeeded.
+ :vartype power_state: ~azure.mgmt.containerservice.models.PowerState
+ :ivar availability_zones: The list of Availability zones to use for nodes. This can only be
+ specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
+ :vartype availability_zones: list[str]
+ :ivar enable_node_public_ip: Whether each node is allocated its own public IP. Some scenarios
+ may require nodes in a node pool to receive their own dedicated public IP addresses. A common
+ scenario is for gaming workloads, where a console needs to make a direct connection to a cloud
+ virtual machine to minimize hops. For more information see `assigning a public IP per node
+ `_.
+ The default is false.
+ :vartype enable_node_public_ip: bool
+ :ivar node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}.
+ :vartype node_public_ip_prefix_id: str
+ :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default
+ is 'Regular'. Known values are: "Spot" and "Regular".
+ :vartype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority
+ :ivar scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This
+ cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is
+ 'Delete'. Known values are: "Delete" and "Deallocate".
+ :vartype scale_set_eviction_policy: str or
+ ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy
+ :ivar spot_max_price: The max price (in US Dollars) you are willing to pay for spot instances.
+ Possible values are any decimal value greater than zero or -1 which indicates default price to
+ be up-to on-demand. Possible values are any decimal value greater than zero or -1 which
+ indicates the willingness to pay any on-demand price. For more details on spot pricing, see
+ `spot VMs pricing `_.
+ :vartype spot_max_price: float
+ :ivar tags: The tags to be persisted on the agent pool virtual machine scale set.
+ :vartype tags: dict[str, str]
+ :ivar node_labels: The node labels to be persisted across all nodes in agent pool.
+ :vartype node_labels: dict[str, str]
+ :ivar node_taints: The taints added to new nodes during node pool create and scale. For
+ example, key=value:NoSchedule.
+ :vartype node_taints: list[str]
+ :ivar proximity_placement_group_id: The ID for Proximity Placement Group.
+ :vartype proximity_placement_group_id: str
+ :ivar kubelet_config: The Kubelet configuration on the agent pool nodes.
+ :vartype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig
+ :ivar linux_os_config: The OS configuration of Linux agent nodes.
+ :vartype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig
+ :ivar enable_encryption_at_host: Whether to enable host based OS and data drive encryption.
+ This is only supported on certain VM sizes and in certain Azure regions. For more information,
+ see: https://docs.microsoft.com/azure/aks/enable-host-encryption.
+ :vartype enable_encryption_at_host: bool
+ :ivar enable_ultra_ssd: Whether to enable UltraSSD.
+ :vartype enable_ultra_ssd: bool
+ :ivar enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool
+ `_
+ for more details.
+ :vartype enable_fips: bool
+ :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile
+ for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g".
+ :vartype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile
+ :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool
+ will be created/upgraded using a snapshot.
+ :vartype creation_data: ~azure.mgmt.containerservice.models.CreationData
+ :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the
+ Capacity Reservation Group.
+ :vartype capacity_reservation_group_id: str
+ :ivar host_group_id: The fully qualified resource ID of the Dedicated Host Group to provision
+ virtual machines from, used only in creation scenario and not allowed to changed once set. This
+ is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}.
+ For more information see `Azure dedicated hosts
+ `_.
+ :vartype host_group_id: str
+ :ivar network_profile: Network-related settings of an agent pool.
+ :vartype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile
+ :ivar windows_profile: The Windows agent pool's specific profile.
+ :vartype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile
+ :ivar security_profile: The security settings of an agent pool.
+ :vartype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile
+ :ivar gpu_profile: GPU settings for the Agent Pool.
+ :vartype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile
+ :ivar gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field
+ cannot be set if agent pool mode is not Gateway.
+ :vartype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile
+ :ivar virtual_machines_profile: Specifications on VirtualMachines agent pool.
+ :vartype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile
+ :ivar virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool.
+ :vartype virtual_machine_nodes_status:
+ list[~azure.mgmt.containerservice.models.VirtualMachineNodes]
+ :ivar status: Contains read-only information about the Agent Pool.
+ :vartype status: ~azure.mgmt.containerservice.models.AgentPoolStatus
+ :ivar local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS overrides.
+ LocalDNS helps improve performance and reliability of DNS resolution in an AKS cluster. For
+ more details see aka.ms/aks/localdns.
+ :vartype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile
+ """
+
+ _validation = {
+ "e_tag": {"readonly": True},
+ "os_disk_size_gb": {"maximum": 2048, "minimum": 0},
+ "current_orchestrator_version": {"readonly": True},
+ "node_image_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "e_tag": {"key": "eTag", "type": "str"},
+ "count": {"key": "count", "type": "int"},
+ "vm_size": {"key": "vmSize", "type": "str"},
+ "os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"},
+ "os_disk_type": {"key": "osDiskType", "type": "str"},
+ "kubelet_disk_type": {"key": "kubeletDiskType", "type": "str"},
+ "workload_runtime": {"key": "workloadRuntime", "type": "str"},
+ "message_of_the_day": {"key": "messageOfTheDay", "type": "str"},
+ "vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"},
+ "pod_subnet_id": {"key": "podSubnetID", "type": "str"},
+ "pod_ip_allocation_mode": {"key": "podIPAllocationMode", "type": "str"},
+ "max_pods": {"key": "maxPods", "type": "int"},
+ "os_type": {"key": "osType", "type": "str"},
+ "os_sku": {"key": "osSKU", "type": "str"},
+ "max_count": {"key": "maxCount", "type": "int"},
+ "min_count": {"key": "minCount", "type": "int"},
+ "enable_auto_scaling": {"key": "enableAutoScaling", "type": "bool"},
+ "scale_down_mode": {"key": "scaleDownMode", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "orchestrator_version": {"key": "orchestratorVersion", "type": "str"},
+ "current_orchestrator_version": {"key": "currentOrchestratorVersion", "type": "str"},
+ "node_image_version": {"key": "nodeImageVersion", "type": "str"},
+ "upgrade_settings": {"key": "upgradeSettings", "type": "AgentPoolUpgradeSettings"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "power_state": {"key": "powerState", "type": "PowerState"},
+ "availability_zones": {"key": "availabilityZones", "type": "[str]"},
+ "enable_node_public_ip": {"key": "enableNodePublicIP", "type": "bool"},
+ "node_public_ip_prefix_id": {"key": "nodePublicIPPrefixID", "type": "str"},
+ "scale_set_priority": {"key": "scaleSetPriority", "type": "str"},
+ "scale_set_eviction_policy": {"key": "scaleSetEvictionPolicy", "type": "str"},
+ "spot_max_price": {"key": "spotMaxPrice", "type": "float"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "node_labels": {"key": "nodeLabels", "type": "{str}"},
+ "node_taints": {"key": "nodeTaints", "type": "[str]"},
+ "proximity_placement_group_id": {"key": "proximityPlacementGroupID", "type": "str"},
+ "kubelet_config": {"key": "kubeletConfig", "type": "KubeletConfig"},
+ "linux_os_config": {"key": "linuxOSConfig", "type": "LinuxOSConfig"},
+ "enable_encryption_at_host": {"key": "enableEncryptionAtHost", "type": "bool"},
+ "enable_ultra_ssd": {"key": "enableUltraSSD", "type": "bool"},
+ "enable_fips": {"key": "enableFIPS", "type": "bool"},
+ "gpu_instance_profile": {"key": "gpuInstanceProfile", "type": "str"},
+ "creation_data": {"key": "creationData", "type": "CreationData"},
+ "capacity_reservation_group_id": {"key": "capacityReservationGroupID", "type": "str"},
+ "host_group_id": {"key": "hostGroupID", "type": "str"},
+ "network_profile": {"key": "networkProfile", "type": "AgentPoolNetworkProfile"},
+ "windows_profile": {"key": "windowsProfile", "type": "AgentPoolWindowsProfile"},
+ "security_profile": {"key": "securityProfile", "type": "AgentPoolSecurityProfile"},
+ "gpu_profile": {"key": "gpuProfile", "type": "GPUProfile"},
+ "gateway_profile": {"key": "gatewayProfile", "type": "AgentPoolGatewayProfile"},
+ "virtual_machines_profile": {"key": "virtualMachinesProfile", "type": "VirtualMachinesProfile"},
+ "virtual_machine_nodes_status": {"key": "virtualMachineNodesStatus", "type": "[VirtualMachineNodes]"},
+ "status": {"key": "status", "type": "AgentPoolStatus"},
+ "local_dns_profile": {"key": "localDNSProfile", "type": "LocalDNSProfile"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ count: Optional[int] = None,
+ vm_size: Optional[str] = None,
+ os_disk_size_gb: Optional[int] = None,
+ os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None,
+ kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None,
+ workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None,
+ message_of_the_day: Optional[str] = None,
+ vnet_subnet_id: Optional[str] = None,
+ pod_subnet_id: Optional[str] = None,
+ pod_ip_allocation_mode: Optional[Union[str, "_models.PodIPAllocationMode"]] = None,
+ max_pods: Optional[int] = None,
+ os_type: Union[str, "_models.OSType"] = "Linux",
+ os_sku: Optional[Union[str, "_models.OSSKU"]] = None,
+ max_count: Optional[int] = None,
+ min_count: Optional[int] = None,
+ enable_auto_scaling: Optional[bool] = None,
+ scale_down_mode: Optional[Union[str, "_models.ScaleDownMode"]] = None,
+ type: Optional[Union[str, "_models.AgentPoolType"]] = None,
+ mode: Optional[Union[str, "_models.AgentPoolMode"]] = None,
+ orchestrator_version: Optional[str] = None,
+ upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None,
+ power_state: Optional["_models.PowerState"] = None,
+ availability_zones: Optional[list[str]] = None,
+ enable_node_public_ip: Optional[bool] = None,
+ node_public_ip_prefix_id: Optional[str] = None,
+ scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular",
+ scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete",
+ spot_max_price: float = -1,
+ tags: Optional[dict[str, str]] = None,
+ node_labels: Optional[dict[str, str]] = None,
+ node_taints: Optional[list[str]] = None,
+ proximity_placement_group_id: Optional[str] = None,
+ kubelet_config: Optional["_models.KubeletConfig"] = None,
+ linux_os_config: Optional["_models.LinuxOSConfig"] = None,
+ enable_encryption_at_host: Optional[bool] = None,
+ enable_ultra_ssd: Optional[bool] = None,
+ enable_fips: Optional[bool] = None,
+ gpu_instance_profile: Optional[Union[str, "_models.GPUInstanceProfile"]] = None,
+ creation_data: Optional["_models.CreationData"] = None,
+ capacity_reservation_group_id: Optional[str] = None,
+ host_group_id: Optional[str] = None,
+ network_profile: Optional["_models.AgentPoolNetworkProfile"] = None,
+ windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None,
+ security_profile: Optional["_models.AgentPoolSecurityProfile"] = None,
+ gpu_profile: Optional["_models.GPUProfile"] = None,
+ gateway_profile: Optional["_models.AgentPoolGatewayProfile"] = None,
+ virtual_machines_profile: Optional["_models.VirtualMachinesProfile"] = None,
+ virtual_machine_nodes_status: Optional[list["_models.VirtualMachineNodes"]] = None,
+ status: Optional["_models.AgentPoolStatus"] = None,
+ local_dns_profile: Optional["_models.LocalDNSProfile"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the
+ range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for
+ system pools. The default value is 1.
+ :paramtype count: int
+ :keyword vm_size: The size of the agent pool VMs. VM size availability varies by region. If a
+ node contains insufficient compute resources (memory, cpu, etc) pods might fail to run
+ correctly. For more details on restricted VM sizes, see:
+ https://docs.microsoft.com/azure/aks/quotas-skus-regions.
+ :paramtype vm_size: str
+ :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
+ machine in the master/agent pool. If you specify 0, it will apply the default osDisk size
+ according to the vmSize specified.
+ :paramtype os_disk_size_gb: int
+ :keyword os_disk_type: The OS disk type to be used for machines in the agent pool. The default
+ is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested
+ OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more
+ information see `Ephemeral OS
+ `_. Known values are:
+ "Managed" and "Ephemeral".
+ :paramtype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType
+ :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime
+ data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary".
+ :paramtype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType
+ :keyword workload_runtime: Determines the type of workload a node can run. Known values are:
+ "OCIContainer", "WasmWasi", and "KataVmIsolation".
+ :paramtype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime
+ :keyword message_of_the_day: Message of the day for Linux nodes, base64-encoded. A
+ base64-encoded string which will be written to /etc/motd after decoding. This allows
+ customization of the message of the day for Linux nodes. It must not be specified for Windows
+ nodes. It must be a static string (i.e., will be printed raw and not be executed as a script).
+ :paramtype message_of_the_day: str
+ :keyword vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will
+ join on startup. If this is not specified, a VNET and subnet will be generated and used. If no
+ podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :paramtype vnet_subnet_id: str
+ :keyword pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted,
+ pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is
+ of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :paramtype pod_subnet_id: str
+ :keyword pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the
+ agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values
+ are: "DynamicIndividual" and "StaticBlock".
+ :paramtype pod_ip_allocation_mode: str or
+ ~azure.mgmt.containerservice.models.PodIPAllocationMode
+ :keyword max_pods: The maximum number of pods that can run on a node.
+ :paramtype max_pods: int
+ :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux"
+ and "Windows".
+ :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType
+ is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >=
+ 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3",
+ "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404".
+ :paramtype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU
+ :keyword max_count: The maximum number of nodes for auto-scaling.
+ :paramtype max_count: int
+ :keyword min_count: The minimum number of nodes for auto-scaling.
+ :paramtype min_count: int
+ :keyword enable_auto_scaling: Whether to enable auto-scaler.
+ :paramtype enable_auto_scaling: bool
+ :keyword scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also
+ effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values
+ are: "Delete" and "Deallocate".
+ :paramtype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode
+ :keyword type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets",
+ "AvailabilitySet", and "VirtualMachines".
+ :paramtype type: str or ~azure.mgmt.containerservice.models.AgentPoolType
+ :keyword mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool
+ at all times. For additional information on agent pool restrictions and best practices, see:
+ https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and
+ "Gateway".
+ :paramtype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode
+ :keyword orchestrator_version: The version of Kubernetes specified by the user. Both patch
+ version (e.g. 1.20.13) and (e.g. 1.20) are supported. When
+ is specified, the latest supported GA patch version is chosen automatically.
+ Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14)
+ will not trigger an upgrade, even if a newer patch version is available. As a best practice,
+ you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node
+ pool version must have the same major version as the control plane. The node pool minor version
+ must be within two minor versions of the control plane version. The node pool version cannot be
+ greater than the control plane version. For more information see `upgrading a node pool
+ `_.
+ :paramtype orchestrator_version: str
+ :keyword upgrade_settings: Settings for upgrading the agentpool.
+ :paramtype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings
+ :keyword power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first
+ created it is initially Running. The Agent Pool can be stopped by setting this field to
+ Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An
+ Agent Pool can only be stopped if it is Running and provisioning state is Succeeded.
+ :paramtype power_state: ~azure.mgmt.containerservice.models.PowerState
+ :keyword availability_zones: The list of Availability zones to use for nodes. This can only be
+ specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
+ :paramtype availability_zones: list[str]
+ :keyword enable_node_public_ip: Whether each node is allocated its own public IP. Some
+ scenarios may require nodes in a node pool to receive their own dedicated public IP addresses.
+ A common scenario is for gaming workloads, where a console needs to make a direct connection to
+ a cloud virtual machine to minimize hops. For more information see `assigning a public IP per
+ node
+ `_.
+ The default is false.
+ :paramtype enable_node_public_ip: bool
+ :keyword node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}.
+ :paramtype node_public_ip_prefix_id: str
+ :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the
+ default is 'Regular'. Known values are: "Spot" and "Regular".
+ :paramtype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority
+ :keyword scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This
+ cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is
+ 'Delete'. Known values are: "Delete" and "Deallocate".
+ :paramtype scale_set_eviction_policy: str or
+ ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy
+ :keyword spot_max_price: The max price (in US Dollars) you are willing to pay for spot
+ instances. Possible values are any decimal value greater than zero or -1 which indicates
+ default price to be up-to on-demand. Possible values are any decimal value greater than zero or
+ -1 which indicates the willingness to pay any on-demand price. For more details on spot
+ pricing, see `spot VMs pricing
+ `_.
+ :paramtype spot_max_price: float
+ :keyword tags: The tags to be persisted on the agent pool virtual machine scale set.
+ :paramtype tags: dict[str, str]
+ :keyword node_labels: The node labels to be persisted across all nodes in agent pool.
+ :paramtype node_labels: dict[str, str]
+ :keyword node_taints: The taints added to new nodes during node pool create and scale. For
+ example, key=value:NoSchedule.
+ :paramtype node_taints: list[str]
+ :keyword proximity_placement_group_id: The ID for Proximity Placement Group.
+ :paramtype proximity_placement_group_id: str
+ :keyword kubelet_config: The Kubelet configuration on the agent pool nodes.
+ :paramtype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig
+ :keyword linux_os_config: The OS configuration of Linux agent nodes.
+ :paramtype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig
+ :keyword enable_encryption_at_host: Whether to enable host based OS and data drive encryption.
+ This is only supported on certain VM sizes and in certain Azure regions. For more information,
+ see: https://docs.microsoft.com/azure/aks/enable-host-encryption.
+ :paramtype enable_encryption_at_host: bool
+ :keyword enable_ultra_ssd: Whether to enable UltraSSD.
+ :paramtype enable_ultra_ssd: bool
+ :keyword enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool
+ `_
+ for more details.
+ :paramtype enable_fips: bool
+ :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance
+ profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and
+ "MIG7g".
+ :paramtype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile
+ :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node
+ pool will be created/upgraded using a snapshot.
+ :paramtype creation_data: ~azure.mgmt.containerservice.models.CreationData
+ :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the
+ Capacity Reservation Group.
+ :paramtype capacity_reservation_group_id: str
+ :keyword host_group_id: The fully qualified resource ID of the Dedicated Host Group to
+ provision virtual machines from, used only in creation scenario and not allowed to changed once
+ set. This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}.
+ For more information see `Azure dedicated hosts
+ `_.
+ :paramtype host_group_id: str
+ :keyword network_profile: Network-related settings of an agent pool.
+ :paramtype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile
+ :keyword windows_profile: The Windows agent pool's specific profile.
+ :paramtype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile
+ :keyword security_profile: The security settings of an agent pool.
+ :paramtype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile
+ :keyword gpu_profile: GPU settings for the Agent Pool.
+ :paramtype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile
+ :keyword gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field
+ cannot be set if agent pool mode is not Gateway.
+ :paramtype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile
+ :keyword virtual_machines_profile: Specifications on VirtualMachines agent pool.
+ :paramtype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile
+ :keyword virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool.
+ :paramtype virtual_machine_nodes_status:
+ list[~azure.mgmt.containerservice.models.VirtualMachineNodes]
+ :keyword status: Contains read-only information about the Agent Pool.
+ :paramtype status: ~azure.mgmt.containerservice.models.AgentPoolStatus
+ :keyword local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS
+ overrides. LocalDNS helps improve performance and reliability of DNS resolution in an AKS
+ cluster. For more details see aka.ms/aks/localdns.
+ :paramtype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile
+ """
+ super().__init__(**kwargs)
+ self.e_tag: Optional[str] = None
+ self.count = count
+ self.vm_size = vm_size
+ self.os_disk_size_gb = os_disk_size_gb
+ self.os_disk_type = os_disk_type
+ self.kubelet_disk_type = kubelet_disk_type
+ self.workload_runtime = workload_runtime
+ self.message_of_the_day = message_of_the_day
+ self.vnet_subnet_id = vnet_subnet_id
+ self.pod_subnet_id = pod_subnet_id
+ self.pod_ip_allocation_mode = pod_ip_allocation_mode
+ self.max_pods = max_pods
+ self.os_type = os_type
+ self.os_sku = os_sku
+ self.max_count = max_count
+ self.min_count = min_count
+ self.enable_auto_scaling = enable_auto_scaling
+ self.scale_down_mode = scale_down_mode
+ self.type = type
+ self.mode = mode
+ self.orchestrator_version = orchestrator_version
+ self.current_orchestrator_version: Optional[str] = None
+ self.node_image_version: Optional[str] = None
+ self.upgrade_settings = upgrade_settings
+ self.provisioning_state: Optional[str] = None
+ self.power_state = power_state
+ self.availability_zones = availability_zones
+ self.enable_node_public_ip = enable_node_public_ip
+ self.node_public_ip_prefix_id = node_public_ip_prefix_id
+ self.scale_set_priority = scale_set_priority
+ self.scale_set_eviction_policy = scale_set_eviction_policy
+ self.spot_max_price = spot_max_price
+ self.tags = tags
+ self.node_labels = node_labels
+ self.node_taints = node_taints
+ self.proximity_placement_group_id = proximity_placement_group_id
+ self.kubelet_config = kubelet_config
+ self.linux_os_config = linux_os_config
+ self.enable_encryption_at_host = enable_encryption_at_host
+ self.enable_ultra_ssd = enable_ultra_ssd
+ self.enable_fips = enable_fips
+ self.gpu_instance_profile = gpu_instance_profile
+ self.creation_data = creation_data
+ self.capacity_reservation_group_id = capacity_reservation_group_id
+ self.host_group_id = host_group_id
+ self.network_profile = network_profile
+ self.windows_profile = windows_profile
+ self.security_profile = security_profile
+ self.gpu_profile = gpu_profile
+ self.gateway_profile = gateway_profile
+ self.virtual_machines_profile = virtual_machines_profile
+ self.virtual_machine_nodes_status = virtual_machine_nodes_status
+ self.status = status
+ self.local_dns_profile = local_dns_profile
+
+
+class ManagedClusterAgentPoolProfile(ManagedClusterAgentPoolProfileProperties):
+ """Profile for the container service agent pool.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value
+ will change when the resource is updated. Specify an if-match or if-none-match header with the
+ eTag value for a subsequent request to enable optimistic concurrency per the normal eTag
+ convention.
+ :vartype e_tag: str
+ :ivar count: Number of agents (VMs) to host docker containers. Allowed values must be in the
+ range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for
+ system pools. The default value is 1.
+ :vartype count: int
+ :ivar vm_size: The size of the agent pool VMs. VM size availability varies by region. If a node
+ contains insufficient compute resources (memory, cpu, etc) pods might fail to run correctly.
+ For more details on restricted VM sizes, see:
+ https://docs.microsoft.com/azure/aks/quotas-skus-regions.
+ :vartype vm_size: str
+ :ivar os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine
+ in the master/agent pool. If you specify 0, it will apply the default osDisk size according to
+ the vmSize specified.
+ :vartype os_disk_size_gb: int
+ :ivar os_disk_type: The OS disk type to be used for machines in the agent pool. The default is
+ 'Ephemeral' if the VM supports it and has a cache disk larger than the requested OSDiskSizeGB.
+ Otherwise, defaults to 'Managed'. May not be changed after creation. For more information see
+ `Ephemeral OS `_.
+ Known values are: "Managed" and "Ephemeral".
+ :vartype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType
+ :ivar kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime data
+ root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary".
+ :vartype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType
+ :ivar workload_runtime: Determines the type of workload a node can run. Known values are:
+ "OCIContainer", "WasmWasi", and "KataVmIsolation".
+ :vartype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime
+ :ivar message_of_the_day: Message of the day for Linux nodes, base64-encoded. A base64-encoded
+ string which will be written to /etc/motd after decoding. This allows customization of the
+ message of the day for Linux nodes. It must not be specified for Windows nodes. It must be a
+ static string (i.e., will be printed raw and not be executed as a script).
+ :vartype message_of_the_day: str
+ :ivar vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will join
+ on startup. If this is not specified, a VNET and subnet will be generated and used. If no
+ podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :vartype vnet_subnet_id: str
+ :ivar pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted, pod
+ IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is of
+ the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :vartype pod_subnet_id: str
+ :ivar pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the
+ agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values
+ are: "DynamicIndividual" and "StaticBlock".
+ :vartype pod_ip_allocation_mode: str or ~azure.mgmt.containerservice.models.PodIPAllocationMode
+ :ivar max_pods: The maximum number of pods that can run on a node.
+ :vartype max_pods: int
+ :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and
+ "Windows".
+ :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is
+ Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >=
+ 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3",
+ "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404".
+ :vartype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU
+ :ivar max_count: The maximum number of nodes for auto-scaling.
+ :vartype max_count: int
+ :ivar min_count: The minimum number of nodes for auto-scaling.
+ :vartype min_count: int
+ :ivar enable_auto_scaling: Whether to enable auto-scaler.
+ :vartype enable_auto_scaling: bool
+ :ivar scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also
+ effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values
+ are: "Delete" and "Deallocate".
+ :vartype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode
+ :ivar type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets",
+ "AvailabilitySet", and "VirtualMachines".
+ :vartype type: str or ~azure.mgmt.containerservice.models.AgentPoolType
+ :ivar mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool at
+ all times. For additional information on agent pool restrictions and best practices, see:
+ https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and
+ "Gateway".
+ :vartype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode
+ :ivar orchestrator_version: The version of Kubernetes specified by the user. Both patch version
+ (e.g. 1.20.13) and (e.g. 1.20) are supported. When
+ is specified, the latest supported GA patch version is chosen automatically.
+ Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14)
+ will not trigger an upgrade, even if a newer patch version is available. As a best practice,
+ you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node
+ pool version must have the same major version as the control plane. The node pool minor version
+ must be within two minor versions of the control plane version. The node pool version cannot be
+ greater than the control plane version. For more information see `upgrading a node pool
+ `_.
+ :vartype orchestrator_version: str
+ :ivar current_orchestrator_version: The version of Kubernetes the Agent Pool is running. If
+ orchestratorVersion is a fully specified version , this field will be
+ exactly equal to it. If orchestratorVersion is , this field will contain the full
+ version being used.
+ :vartype current_orchestrator_version: str
+ :ivar node_image_version: The version of node image.
+ :vartype node_image_version: str
+ :ivar upgrade_settings: Settings for upgrading the agentpool.
+ :vartype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings
+ :ivar provisioning_state: The current deployment or provisioning state.
+ :vartype provisioning_state: str
+ :ivar power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first
+ created it is initially Running. The Agent Pool can be stopped by setting this field to
+ Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An
+ Agent Pool can only be stopped if it is Running and provisioning state is Succeeded.
+ :vartype power_state: ~azure.mgmt.containerservice.models.PowerState
+ :ivar availability_zones: The list of Availability zones to use for nodes. This can only be
+ specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
+ :vartype availability_zones: list[str]
+ :ivar enable_node_public_ip: Whether each node is allocated its own public IP. Some scenarios
+ may require nodes in a node pool to receive their own dedicated public IP addresses. A common
+ scenario is for gaming workloads, where a console needs to make a direct connection to a cloud
+ virtual machine to minimize hops. For more information see `assigning a public IP per node
+ `_.
+ The default is false.
+ :vartype enable_node_public_ip: bool
+ :ivar node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}.
+ :vartype node_public_ip_prefix_id: str
+ :ivar scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the default
+ is 'Regular'. Known values are: "Spot" and "Regular".
+ :vartype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority
+ :ivar scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This
+ cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is
+ 'Delete'. Known values are: "Delete" and "Deallocate".
+ :vartype scale_set_eviction_policy: str or
+ ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy
+ :ivar spot_max_price: The max price (in US Dollars) you are willing to pay for spot instances.
+ Possible values are any decimal value greater than zero or -1 which indicates default price to
+ be up-to on-demand. Possible values are any decimal value greater than zero or -1 which
+ indicates the willingness to pay any on-demand price. For more details on spot pricing, see
+ `spot VMs pricing `_.
+ :vartype spot_max_price: float
+ :ivar tags: The tags to be persisted on the agent pool virtual machine scale set.
+ :vartype tags: dict[str, str]
+ :ivar node_labels: The node labels to be persisted across all nodes in agent pool.
+ :vartype node_labels: dict[str, str]
+ :ivar node_taints: The taints added to new nodes during node pool create and scale. For
+ example, key=value:NoSchedule.
+ :vartype node_taints: list[str]
+ :ivar proximity_placement_group_id: The ID for Proximity Placement Group.
+ :vartype proximity_placement_group_id: str
+ :ivar kubelet_config: The Kubelet configuration on the agent pool nodes.
+ :vartype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig
+ :ivar linux_os_config: The OS configuration of Linux agent nodes.
+ :vartype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig
+ :ivar enable_encryption_at_host: Whether to enable host based OS and data drive encryption.
+ This is only supported on certain VM sizes and in certain Azure regions. For more information,
+ see: https://docs.microsoft.com/azure/aks/enable-host-encryption.
+ :vartype enable_encryption_at_host: bool
+ :ivar enable_ultra_ssd: Whether to enable UltraSSD.
+ :vartype enable_ultra_ssd: bool
+ :ivar enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool
+ `_
+ for more details.
+ :vartype enable_fips: bool
+ :ivar gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile
+ for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and "MIG7g".
+ :vartype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile
+ :ivar creation_data: CreationData to be used to specify the source Snapshot ID if the node pool
+ will be created/upgraded using a snapshot.
+ :vartype creation_data: ~azure.mgmt.containerservice.models.CreationData
+ :ivar capacity_reservation_group_id: AKS will associate the specified agent pool with the
+ Capacity Reservation Group.
+ :vartype capacity_reservation_group_id: str
+ :ivar host_group_id: The fully qualified resource ID of the Dedicated Host Group to provision
+ virtual machines from, used only in creation scenario and not allowed to changed once set. This
+ is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}.
+ For more information see `Azure dedicated hosts
+ `_.
+ :vartype host_group_id: str
+ :ivar network_profile: Network-related settings of an agent pool.
+ :vartype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile
+ :ivar windows_profile: The Windows agent pool's specific profile.
+ :vartype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile
+ :ivar security_profile: The security settings of an agent pool.
+ :vartype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile
+ :ivar gpu_profile: GPU settings for the Agent Pool.
+ :vartype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile
+ :ivar gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field
+ cannot be set if agent pool mode is not Gateway.
+ :vartype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile
+ :ivar virtual_machines_profile: Specifications on VirtualMachines agent pool.
+ :vartype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile
+ :ivar virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool.
+ :vartype virtual_machine_nodes_status:
+ list[~azure.mgmt.containerservice.models.VirtualMachineNodes]
+ :ivar status: Contains read-only information about the Agent Pool.
+ :vartype status: ~azure.mgmt.containerservice.models.AgentPoolStatus
+ :ivar local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS overrides.
+ LocalDNS helps improve performance and reliability of DNS resolution in an AKS cluster. For
+ more details see aka.ms/aks/localdns.
+ :vartype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile
+ :ivar name: Unique name of the agent pool profile in the context of the subscription and
+ resource group. Windows agent pool names must be 6 characters or less. Required.
+ :vartype name: str
+ """
+
+ _validation = {
+ "e_tag": {"readonly": True},
+ "os_disk_size_gb": {"maximum": 2048, "minimum": 0},
+ "current_orchestrator_version": {"readonly": True},
+ "node_image_version": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ "name": {"required": True, "pattern": r"^[a-z][a-z0-9]{0,11}$"},
+ }
+
+ _attribute_map = {
+ "e_tag": {"key": "eTag", "type": "str"},
+ "count": {"key": "count", "type": "int"},
+ "vm_size": {"key": "vmSize", "type": "str"},
+ "os_disk_size_gb": {"key": "osDiskSizeGB", "type": "int"},
+ "os_disk_type": {"key": "osDiskType", "type": "str"},
+ "kubelet_disk_type": {"key": "kubeletDiskType", "type": "str"},
+ "workload_runtime": {"key": "workloadRuntime", "type": "str"},
+ "message_of_the_day": {"key": "messageOfTheDay", "type": "str"},
+ "vnet_subnet_id": {"key": "vnetSubnetID", "type": "str"},
+ "pod_subnet_id": {"key": "podSubnetID", "type": "str"},
+ "pod_ip_allocation_mode": {"key": "podIPAllocationMode", "type": "str"},
+ "max_pods": {"key": "maxPods", "type": "int"},
+ "os_type": {"key": "osType", "type": "str"},
+ "os_sku": {"key": "osSKU", "type": "str"},
+ "max_count": {"key": "maxCount", "type": "int"},
+ "min_count": {"key": "minCount", "type": "int"},
+ "enable_auto_scaling": {"key": "enableAutoScaling", "type": "bool"},
+ "scale_down_mode": {"key": "scaleDownMode", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "mode": {"key": "mode", "type": "str"},
+ "orchestrator_version": {"key": "orchestratorVersion", "type": "str"},
+ "current_orchestrator_version": {"key": "currentOrchestratorVersion", "type": "str"},
+ "node_image_version": {"key": "nodeImageVersion", "type": "str"},
+ "upgrade_settings": {"key": "upgradeSettings", "type": "AgentPoolUpgradeSettings"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "power_state": {"key": "powerState", "type": "PowerState"},
+ "availability_zones": {"key": "availabilityZones", "type": "[str]"},
+ "enable_node_public_ip": {"key": "enableNodePublicIP", "type": "bool"},
+ "node_public_ip_prefix_id": {"key": "nodePublicIPPrefixID", "type": "str"},
+ "scale_set_priority": {"key": "scaleSetPriority", "type": "str"},
+ "scale_set_eviction_policy": {"key": "scaleSetEvictionPolicy", "type": "str"},
+ "spot_max_price": {"key": "spotMaxPrice", "type": "float"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "node_labels": {"key": "nodeLabels", "type": "{str}"},
+ "node_taints": {"key": "nodeTaints", "type": "[str]"},
+ "proximity_placement_group_id": {"key": "proximityPlacementGroupID", "type": "str"},
+ "kubelet_config": {"key": "kubeletConfig", "type": "KubeletConfig"},
+ "linux_os_config": {"key": "linuxOSConfig", "type": "LinuxOSConfig"},
+ "enable_encryption_at_host": {"key": "enableEncryptionAtHost", "type": "bool"},
+ "enable_ultra_ssd": {"key": "enableUltraSSD", "type": "bool"},
+ "enable_fips": {"key": "enableFIPS", "type": "bool"},
+ "gpu_instance_profile": {"key": "gpuInstanceProfile", "type": "str"},
+ "creation_data": {"key": "creationData", "type": "CreationData"},
+ "capacity_reservation_group_id": {"key": "capacityReservationGroupID", "type": "str"},
+ "host_group_id": {"key": "hostGroupID", "type": "str"},
+ "network_profile": {"key": "networkProfile", "type": "AgentPoolNetworkProfile"},
+ "windows_profile": {"key": "windowsProfile", "type": "AgentPoolWindowsProfile"},
+ "security_profile": {"key": "securityProfile", "type": "AgentPoolSecurityProfile"},
+ "gpu_profile": {"key": "gpuProfile", "type": "GPUProfile"},
+ "gateway_profile": {"key": "gatewayProfile", "type": "AgentPoolGatewayProfile"},
+ "virtual_machines_profile": {"key": "virtualMachinesProfile", "type": "VirtualMachinesProfile"},
+ "virtual_machine_nodes_status": {"key": "virtualMachineNodesStatus", "type": "[VirtualMachineNodes]"},
+ "status": {"key": "status", "type": "AgentPoolStatus"},
+ "local_dns_profile": {"key": "localDNSProfile", "type": "LocalDNSProfile"},
+ "name": {"key": "name", "type": "str"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ name: str,
+ count: Optional[int] = None,
+ vm_size: Optional[str] = None,
+ os_disk_size_gb: Optional[int] = None,
+ os_disk_type: Optional[Union[str, "_models.OSDiskType"]] = None,
+ kubelet_disk_type: Optional[Union[str, "_models.KubeletDiskType"]] = None,
+ workload_runtime: Optional[Union[str, "_models.WorkloadRuntime"]] = None,
+ message_of_the_day: Optional[str] = None,
+ vnet_subnet_id: Optional[str] = None,
+ pod_subnet_id: Optional[str] = None,
+ pod_ip_allocation_mode: Optional[Union[str, "_models.PodIPAllocationMode"]] = None,
+ max_pods: Optional[int] = None,
+ os_type: Union[str, "_models.OSType"] = "Linux",
+ os_sku: Optional[Union[str, "_models.OSSKU"]] = None,
+ max_count: Optional[int] = None,
+ min_count: Optional[int] = None,
+ enable_auto_scaling: Optional[bool] = None,
+ scale_down_mode: Optional[Union[str, "_models.ScaleDownMode"]] = None,
+ type: Optional[Union[str, "_models.AgentPoolType"]] = None,
+ mode: Optional[Union[str, "_models.AgentPoolMode"]] = None,
+ orchestrator_version: Optional[str] = None,
+ upgrade_settings: Optional["_models.AgentPoolUpgradeSettings"] = None,
+ power_state: Optional["_models.PowerState"] = None,
+ availability_zones: Optional[list[str]] = None,
+ enable_node_public_ip: Optional[bool] = None,
+ node_public_ip_prefix_id: Optional[str] = None,
+ scale_set_priority: Union[str, "_models.ScaleSetPriority"] = "Regular",
+ scale_set_eviction_policy: Union[str, "_models.ScaleSetEvictionPolicy"] = "Delete",
+ spot_max_price: float = -1,
+ tags: Optional[dict[str, str]] = None,
+ node_labels: Optional[dict[str, str]] = None,
+ node_taints: Optional[list[str]] = None,
+ proximity_placement_group_id: Optional[str] = None,
+ kubelet_config: Optional["_models.KubeletConfig"] = None,
+ linux_os_config: Optional["_models.LinuxOSConfig"] = None,
+ enable_encryption_at_host: Optional[bool] = None,
+ enable_ultra_ssd: Optional[bool] = None,
+ enable_fips: Optional[bool] = None,
+ gpu_instance_profile: Optional[Union[str, "_models.GPUInstanceProfile"]] = None,
+ creation_data: Optional["_models.CreationData"] = None,
+ capacity_reservation_group_id: Optional[str] = None,
+ host_group_id: Optional[str] = None,
+ network_profile: Optional["_models.AgentPoolNetworkProfile"] = None,
+ windows_profile: Optional["_models.AgentPoolWindowsProfile"] = None,
+ security_profile: Optional["_models.AgentPoolSecurityProfile"] = None,
+ gpu_profile: Optional["_models.GPUProfile"] = None,
+ gateway_profile: Optional["_models.AgentPoolGatewayProfile"] = None,
+ virtual_machines_profile: Optional["_models.VirtualMachinesProfile"] = None,
+ virtual_machine_nodes_status: Optional[list["_models.VirtualMachineNodes"]] = None,
+ status: Optional["_models.AgentPoolStatus"] = None,
+ local_dns_profile: Optional["_models.LocalDNSProfile"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword count: Number of agents (VMs) to host docker containers. Allowed values must be in the
+ range of 0 to 1000 (inclusive) for user pools and in the range of 1 to 1000 (inclusive) for
+ system pools. The default value is 1.
+ :paramtype count: int
+ :keyword vm_size: The size of the agent pool VMs. VM size availability varies by region. If a
+ node contains insufficient compute resources (memory, cpu, etc) pods might fail to run
+ correctly. For more details on restricted VM sizes, see:
+ https://docs.microsoft.com/azure/aks/quotas-skus-regions.
+ :paramtype vm_size: str
+ :keyword os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
+ machine in the master/agent pool. If you specify 0, it will apply the default osDisk size
+ according to the vmSize specified.
+ :paramtype os_disk_size_gb: int
+ :keyword os_disk_type: The OS disk type to be used for machines in the agent pool. The default
+ is 'Ephemeral' if the VM supports it and has a cache disk larger than the requested
+ OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation. For more
+ information see `Ephemeral OS
+ `_. Known values are:
+ "Managed" and "Ephemeral".
+ :paramtype os_disk_type: str or ~azure.mgmt.containerservice.models.OSDiskType
+ :keyword kubelet_disk_type: Determines the placement of emptyDir volumes, container runtime
+ data root, and Kubelet ephemeral storage. Known values are: "OS" and "Temporary".
+ :paramtype kubelet_disk_type: str or ~azure.mgmt.containerservice.models.KubeletDiskType
+ :keyword workload_runtime: Determines the type of workload a node can run. Known values are:
+ "OCIContainer", "WasmWasi", and "KataVmIsolation".
+ :paramtype workload_runtime: str or ~azure.mgmt.containerservice.models.WorkloadRuntime
+ :keyword message_of_the_day: Message of the day for Linux nodes, base64-encoded. A
+ base64-encoded string which will be written to /etc/motd after decoding. This allows
+ customization of the message of the day for Linux nodes. It must not be specified for Windows
+ nodes. It must be a static string (i.e., will be printed raw and not be executed as a script).
+ :paramtype message_of_the_day: str
+ :keyword vnet_subnet_id: The ID of the subnet which agent pool nodes and optionally pods will
+ join on startup. If this is not specified, a VNET and subnet will be generated and used. If no
+ podSubnetID is specified, this applies to nodes and pods, otherwise it applies to just nodes.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :paramtype vnet_subnet_id: str
+ :keyword pod_subnet_id: The ID of the subnet which pods will join when launched. If omitted,
+ pod IPs are statically assigned on the node subnet (see vnetSubnetID for more details). This is
+ of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}.
+ :paramtype pod_subnet_id: str
+ :keyword pod_ip_allocation_mode: Pod IP Allocation Mode. The IP allocation mode for pods in the
+ agent pool. Must be used with podSubnetId. The default is 'DynamicIndividual'. Known values
+ are: "DynamicIndividual" and "StaticBlock".
+ :paramtype pod_ip_allocation_mode: str or
+ ~azure.mgmt.containerservice.models.PodIPAllocationMode
+ :keyword max_pods: The maximum number of pods that can run on a node.
+ :paramtype max_pods: int
+ :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux"
+ and "Windows".
+ :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :keyword os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType
+ is Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >=
+ 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3",
+ "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404".
+ :paramtype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU
+ :keyword max_count: The maximum number of nodes for auto-scaling.
+ :paramtype max_count: int
+ :keyword min_count: The minimum number of nodes for auto-scaling.
+ :paramtype min_count: int
+ :keyword enable_auto_scaling: Whether to enable auto-scaler.
+ :paramtype enable_auto_scaling: bool
+ :keyword scale_down_mode: The scale down mode to use when scaling the Agent Pool. This also
+ effects the cluster autoscaler behavior. If not specified, it defaults to Delete. Known values
+ are: "Delete" and "Deallocate".
+ :paramtype scale_down_mode: str or ~azure.mgmt.containerservice.models.ScaleDownMode
+ :keyword type: The type of Agent Pool. Known values are: "VirtualMachineScaleSets",
+ "AvailabilitySet", and "VirtualMachines".
+ :paramtype type: str or ~azure.mgmt.containerservice.models.AgentPoolType
+ :keyword mode: The mode of an agent pool. A cluster must have at least one 'System' Agent Pool
+ at all times. For additional information on agent pool restrictions and best practices, see:
+ https://docs.microsoft.com/azure/aks/use-system-pools. Known values are: "System", "User", and
+ "Gateway".
+ :paramtype mode: str or ~azure.mgmt.containerservice.models.AgentPoolMode
+ :keyword orchestrator_version: The version of Kubernetes specified by the user. Both patch
+ version (e.g. 1.20.13) and (e.g. 1.20) are supported. When
+ is specified, the latest supported GA patch version is chosen automatically.
+ Updating the cluster with the same once it has been created (e.g. 1.14.x -> 1.14)
+ will not trigger an upgrade, even if a newer patch version is available. As a best practice,
+ you should upgrade all node pools in an AKS cluster to the same Kubernetes version. The node
+ pool version must have the same major version as the control plane. The node pool minor version
+ must be within two minor versions of the control plane version. The node pool version cannot be
+ greater than the control plane version. For more information see `upgrading a node pool
+ `_.
+ :paramtype orchestrator_version: str
+ :keyword upgrade_settings: Settings for upgrading the agentpool.
+ :paramtype upgrade_settings: ~azure.mgmt.containerservice.models.AgentPoolUpgradeSettings
+ :keyword power_state: Whether the Agent Pool is running or stopped. When an Agent Pool is first
+ created it is initially Running. The Agent Pool can be stopped by setting this field to
+ Stopped. A stopped Agent Pool stops all of its VMs and does not accrue billing charges. An
+ Agent Pool can only be stopped if it is Running and provisioning state is Succeeded.
+ :paramtype power_state: ~azure.mgmt.containerservice.models.PowerState
+ :keyword availability_zones: The list of Availability zones to use for nodes. This can only be
+ specified if the AgentPoolType property is 'VirtualMachineScaleSets'.
+ :paramtype availability_zones: list[str]
+ :keyword enable_node_public_ip: Whether each node is allocated its own public IP. Some
+ scenarios may require nodes in a node pool to receive their own dedicated public IP addresses.
+ A common scenario is for gaming workloads, where a console needs to make a direct connection to
+ a cloud virtual machine to minimize hops. For more information see `assigning a public IP per
+ node
+ `_.
+ The default is false.
+ :paramtype enable_node_public_ip: bool
+ :keyword node_public_ip_prefix_id: The public IP prefix ID which VM nodes should use IPs from.
+ This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIPPrefixName}.
+ :paramtype node_public_ip_prefix_id: str
+ :keyword scale_set_priority: The Virtual Machine Scale Set priority. If not specified, the
+ default is 'Regular'. Known values are: "Spot" and "Regular".
+ :paramtype scale_set_priority: str or ~azure.mgmt.containerservice.models.ScaleSetPriority
+ :keyword scale_set_eviction_policy: The Virtual Machine Scale Set eviction policy to use. This
+ cannot be specified unless the scaleSetPriority is 'Spot'. If not specified, the default is
+ 'Delete'. Known values are: "Delete" and "Deallocate".
+ :paramtype scale_set_eviction_policy: str or
+ ~azure.mgmt.containerservice.models.ScaleSetEvictionPolicy
+ :keyword spot_max_price: The max price (in US Dollars) you are willing to pay for spot
+ instances. Possible values are any decimal value greater than zero or -1 which indicates
+ default price to be up-to on-demand. Possible values are any decimal value greater than zero or
+ -1 which indicates the willingness to pay any on-demand price. For more details on spot
+ pricing, see `spot VMs pricing
+ `_.
+ :paramtype spot_max_price: float
+ :keyword tags: The tags to be persisted on the agent pool virtual machine scale set.
+ :paramtype tags: dict[str, str]
+ :keyword node_labels: The node labels to be persisted across all nodes in agent pool.
+ :paramtype node_labels: dict[str, str]
+ :keyword node_taints: The taints added to new nodes during node pool create and scale. For
+ example, key=value:NoSchedule.
+ :paramtype node_taints: list[str]
+ :keyword proximity_placement_group_id: The ID for Proximity Placement Group.
+ :paramtype proximity_placement_group_id: str
+ :keyword kubelet_config: The Kubelet configuration on the agent pool nodes.
+ :paramtype kubelet_config: ~azure.mgmt.containerservice.models.KubeletConfig
+ :keyword linux_os_config: The OS configuration of Linux agent nodes.
+ :paramtype linux_os_config: ~azure.mgmt.containerservice.models.LinuxOSConfig
+ :keyword enable_encryption_at_host: Whether to enable host based OS and data drive encryption.
+ This is only supported on certain VM sizes and in certain Azure regions. For more information,
+ see: https://docs.microsoft.com/azure/aks/enable-host-encryption.
+ :paramtype enable_encryption_at_host: bool
+ :keyword enable_ultra_ssd: Whether to enable UltraSSD.
+ :paramtype enable_ultra_ssd: bool
+ :keyword enable_fips: Whether to use a FIPS-enabled OS. See `Add a FIPS-enabled node pool
+ `_
+ for more details.
+ :paramtype enable_fips: bool
+ :keyword gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance
+ profile for supported GPU VM SKU. Known values are: "MIG1g", "MIG2g", "MIG3g", "MIG4g", and
+ "MIG7g".
+ :paramtype gpu_instance_profile: str or ~azure.mgmt.containerservice.models.GPUInstanceProfile
+ :keyword creation_data: CreationData to be used to specify the source Snapshot ID if the node
+ pool will be created/upgraded using a snapshot.
+ :paramtype creation_data: ~azure.mgmt.containerservice.models.CreationData
+ :keyword capacity_reservation_group_id: AKS will associate the specified agent pool with the
+ Capacity Reservation Group.
+ :paramtype capacity_reservation_group_id: str
+ :keyword host_group_id: The fully qualified resource ID of the Dedicated Host Group to
+ provision virtual machines from, used only in creation scenario and not allowed to changed once
+ set. This is of the form:
+ /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}.
+ For more information see `Azure dedicated hosts
+ `_.
+ :paramtype host_group_id: str
+ :keyword network_profile: Network-related settings of an agent pool.
+ :paramtype network_profile: ~azure.mgmt.containerservice.models.AgentPoolNetworkProfile
+ :keyword windows_profile: The Windows agent pool's specific profile.
+ :paramtype windows_profile: ~azure.mgmt.containerservice.models.AgentPoolWindowsProfile
+ :keyword security_profile: The security settings of an agent pool.
+ :paramtype security_profile: ~azure.mgmt.containerservice.models.AgentPoolSecurityProfile
+ :keyword gpu_profile: GPU settings for the Agent Pool.
+ :paramtype gpu_profile: ~azure.mgmt.containerservice.models.GPUProfile
+ :keyword gateway_profile: Profile specific to a managed agent pool in Gateway mode. This field
+ cannot be set if agent pool mode is not Gateway.
+ :paramtype gateway_profile: ~azure.mgmt.containerservice.models.AgentPoolGatewayProfile
+ :keyword virtual_machines_profile: Specifications on VirtualMachines agent pool.
+ :paramtype virtual_machines_profile: ~azure.mgmt.containerservice.models.VirtualMachinesProfile
+ :keyword virtual_machine_nodes_status: The status of nodes in a VirtualMachines agent pool.
+ :paramtype virtual_machine_nodes_status:
+ list[~azure.mgmt.containerservice.models.VirtualMachineNodes]
+ :keyword status: Contains read-only information about the Agent Pool.
+ :paramtype status: ~azure.mgmt.containerservice.models.AgentPoolStatus
+ :keyword local_dns_profile: Configures the per-node local DNS, with VnetDNS and KubeDNS
+ overrides. LocalDNS helps improve performance and reliability of DNS resolution in an AKS
+ cluster. For more details see aka.ms/aks/localdns.
+ :paramtype local_dns_profile: ~azure.mgmt.containerservice.models.LocalDNSProfile
+ :keyword name: Unique name of the agent pool profile in the context of the subscription and
+ resource group. Windows agent pool names must be 6 characters or less. Required.
+ :paramtype name: str
+ """
+ super().__init__(
+ count=count,
+ vm_size=vm_size,
+ os_disk_size_gb=os_disk_size_gb,
+ os_disk_type=os_disk_type,
+ kubelet_disk_type=kubelet_disk_type,
+ workload_runtime=workload_runtime,
+ message_of_the_day=message_of_the_day,
+ vnet_subnet_id=vnet_subnet_id,
+ pod_subnet_id=pod_subnet_id,
+ pod_ip_allocation_mode=pod_ip_allocation_mode,
+ max_pods=max_pods,
+ os_type=os_type,
+ os_sku=os_sku,
+ max_count=max_count,
+ min_count=min_count,
+ enable_auto_scaling=enable_auto_scaling,
+ scale_down_mode=scale_down_mode,
+ type=type,
+ mode=mode,
+ orchestrator_version=orchestrator_version,
+ upgrade_settings=upgrade_settings,
+ power_state=power_state,
+ availability_zones=availability_zones,
+ enable_node_public_ip=enable_node_public_ip,
+ node_public_ip_prefix_id=node_public_ip_prefix_id,
+ scale_set_priority=scale_set_priority,
+ scale_set_eviction_policy=scale_set_eviction_policy,
+ spot_max_price=spot_max_price,
+ tags=tags,
+ node_labels=node_labels,
+ node_taints=node_taints,
+ proximity_placement_group_id=proximity_placement_group_id,
+ kubelet_config=kubelet_config,
+ linux_os_config=linux_os_config,
+ enable_encryption_at_host=enable_encryption_at_host,
+ enable_ultra_ssd=enable_ultra_ssd,
+ enable_fips=enable_fips,
+ gpu_instance_profile=gpu_instance_profile,
+ creation_data=creation_data,
+ capacity_reservation_group_id=capacity_reservation_group_id,
+ host_group_id=host_group_id,
+ network_profile=network_profile,
+ windows_profile=windows_profile,
+ security_profile=security_profile,
+ gpu_profile=gpu_profile,
+ gateway_profile=gateway_profile,
+ virtual_machines_profile=virtual_machines_profile,
+ virtual_machine_nodes_status=virtual_machine_nodes_status,
+ status=status,
+ local_dns_profile=local_dns_profile,
+ **kwargs
+ )
+ self.name = name
+
+
+class ManagedClusterAIToolchainOperatorProfile(_serialization.Model):
+ """When enabling the operator, a set of AKS managed CRDs and controllers will be installed in the
+ cluster. The operator automates the deployment of OSS models for inference and/or training
+ purposes. It provides a set of preset models and enables distributed inference against them.
+
+ :ivar enabled: Whether to enable AI toolchain operator to the cluster. Indicates if AI
+ toolchain operator enabled or not.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable AI toolchain operator to the cluster. Indicates if AI
+ toolchain operator enabled or not.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterAPIServerAccessProfile(_serialization.Model):
+ """Access profile for managed cluster API server.
+
+ :ivar authorized_ip_ranges: The IP ranges authorized to access the Kubernetes API server. IP
+ ranges are specified in CIDR format, e.g. 137.117.106.88/29. This feature is not compatible
+ with clusters that use Public IP Per Node, or clusters that are using a Basic Load Balancer.
+ For more information see `API server authorized IP ranges
+ `_.
+ :vartype authorized_ip_ranges: list[str]
+ :ivar enable_private_cluster: Whether to create the cluster as a private cluster or not. For
+ more details, see `Creating a private AKS cluster
+ `_.
+ :vartype enable_private_cluster: bool
+ :ivar private_dns_zone: The private DNS zone mode for the cluster. The default is System. For
+ more details see `configure private DNS zone
+ `_. Allowed
+ values are 'system' and 'none'.
+ :vartype private_dns_zone: str
+ :ivar enable_private_cluster_public_fqdn: Whether to create additional public FQDN for private
+ cluster or not.
+ :vartype enable_private_cluster_public_fqdn: bool
+ :ivar disable_run_command: Whether to disable run command for the cluster or not.
+ :vartype disable_run_command: bool
+ :ivar enable_vnet_integration: Whether to enable apiserver vnet integration for the cluster or
+ not. See aka.ms/AksVnetIntegration for more details.
+ :vartype enable_vnet_integration: bool
+ :ivar subnet_id: The subnet to be used when apiserver vnet integration is enabled. It is
+ required when creating a new cluster with BYO Vnet, or when updating an existing cluster to
+ enable apiserver vnet integration.
+ :vartype subnet_id: str
+ """
+
+ _attribute_map = {
+ "authorized_ip_ranges": {"key": "authorizedIPRanges", "type": "[str]"},
+ "enable_private_cluster": {"key": "enablePrivateCluster", "type": "bool"},
+ "private_dns_zone": {"key": "privateDNSZone", "type": "str"},
+ "enable_private_cluster_public_fqdn": {"key": "enablePrivateClusterPublicFQDN", "type": "bool"},
+ "disable_run_command": {"key": "disableRunCommand", "type": "bool"},
+ "enable_vnet_integration": {"key": "enableVnetIntegration", "type": "bool"},
+ "subnet_id": {"key": "subnetId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ authorized_ip_ranges: Optional[list[str]] = None,
+ enable_private_cluster: Optional[bool] = None,
+ private_dns_zone: Optional[str] = None,
+ enable_private_cluster_public_fqdn: Optional[bool] = None,
+ disable_run_command: Optional[bool] = None,
+ enable_vnet_integration: Optional[bool] = None,
+ subnet_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword authorized_ip_ranges: The IP ranges authorized to access the Kubernetes API server. IP
+ ranges are specified in CIDR format, e.g. 137.117.106.88/29. This feature is not compatible
+ with clusters that use Public IP Per Node, or clusters that are using a Basic Load Balancer.
+ For more information see `API server authorized IP ranges
+ `_.
+ :paramtype authorized_ip_ranges: list[str]
+ :keyword enable_private_cluster: Whether to create the cluster as a private cluster or not. For
+ more details, see `Creating a private AKS cluster
+ `_.
+ :paramtype enable_private_cluster: bool
+ :keyword private_dns_zone: The private DNS zone mode for the cluster. The default is System.
+ For more details see `configure private DNS zone
+ `_. Allowed
+ values are 'system' and 'none'.
+ :paramtype private_dns_zone: str
+ :keyword enable_private_cluster_public_fqdn: Whether to create additional public FQDN for
+ private cluster or not.
+ :paramtype enable_private_cluster_public_fqdn: bool
+ :keyword disable_run_command: Whether to disable run command for the cluster or not.
+ :paramtype disable_run_command: bool
+ :keyword enable_vnet_integration: Whether to enable apiserver vnet integration for the cluster
+ or not. See aka.ms/AksVnetIntegration for more details.
+ :paramtype enable_vnet_integration: bool
+ :keyword subnet_id: The subnet to be used when apiserver vnet integration is enabled. It is
+ required when creating a new cluster with BYO Vnet, or when updating an existing cluster to
+ enable apiserver vnet integration.
+ :paramtype subnet_id: str
+ """
+ super().__init__(**kwargs)
+ self.authorized_ip_ranges = authorized_ip_ranges
+ self.enable_private_cluster = enable_private_cluster
+ self.private_dns_zone = private_dns_zone
+ self.enable_private_cluster_public_fqdn = enable_private_cluster_public_fqdn
+ self.disable_run_command = disable_run_command
+ self.enable_vnet_integration = enable_vnet_integration
+ self.subnet_id = subnet_id
+
+
+class ManagedClusterAutoUpgradeProfile(_serialization.Model):
+ """Auto upgrade profile for a managed cluster.
+
+ :ivar upgrade_channel: The upgrade channel for auto upgrade. The default is 'none'. For more
+ information see `setting the AKS cluster auto-upgrade channel
+ `_. Known values
+ are: "rapid", "stable", "patch", "node-image", and "none".
+ :vartype upgrade_channel: str or ~azure.mgmt.containerservice.models.UpgradeChannel
+ :ivar node_os_upgrade_channel: Node OS Upgrade Channel. Manner in which the OS on your nodes is
+ updated. The default is NodeImage. Known values are: "None", "Unmanaged", "NodeImage", and
+ "SecurityPatch".
+ :vartype node_os_upgrade_channel: str or
+ ~azure.mgmt.containerservice.models.NodeOSUpgradeChannel
+ """
+
+ _attribute_map = {
+ "upgrade_channel": {"key": "upgradeChannel", "type": "str"},
+ "node_os_upgrade_channel": {"key": "nodeOSUpgradeChannel", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ upgrade_channel: Optional[Union[str, "_models.UpgradeChannel"]] = None,
+ node_os_upgrade_channel: Optional[Union[str, "_models.NodeOSUpgradeChannel"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword upgrade_channel: The upgrade channel for auto upgrade. The default is 'none'. For more
+ information see `setting the AKS cluster auto-upgrade channel
+ `_. Known values
+ are: "rapid", "stable", "patch", "node-image", and "none".
+ :paramtype upgrade_channel: str or ~azure.mgmt.containerservice.models.UpgradeChannel
+ :keyword node_os_upgrade_channel: Node OS Upgrade Channel. Manner in which the OS on your nodes
+ is updated. The default is NodeImage. Known values are: "None", "Unmanaged", "NodeImage", and
+ "SecurityPatch".
+ :paramtype node_os_upgrade_channel: str or
+ ~azure.mgmt.containerservice.models.NodeOSUpgradeChannel
+ """
+ super().__init__(**kwargs)
+ self.upgrade_channel = upgrade_channel
+ self.node_os_upgrade_channel = node_os_upgrade_channel
+
+
+class ManagedClusterAzureMonitorProfile(_serialization.Model):
+ """Azure Monitor addon profiles for monitoring the managed cluster.
+
+ :ivar metrics: Metrics profile for the Azure Monitor managed service for Prometheus addon.
+ Collect out-of-the-box Kubernetes infrastructure metrics to send to an Azure Monitor Workspace
+ and configure additional scraping for custom targets. See aka.ms/AzureManagedPrometheus for an
+ overview.
+ :vartype metrics: ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfileMetrics
+ """
+
+ _attribute_map = {
+ "metrics": {"key": "metrics", "type": "ManagedClusterAzureMonitorProfileMetrics"},
+ }
+
+ def __init__(
+ self, *, metrics: Optional["_models.ManagedClusterAzureMonitorProfileMetrics"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword metrics: Metrics profile for the Azure Monitor managed service for Prometheus addon.
+ Collect out-of-the-box Kubernetes infrastructure metrics to send to an Azure Monitor Workspace
+ and configure additional scraping for custom targets. See aka.ms/AzureManagedPrometheus for an
+ overview.
+ :paramtype metrics:
+ ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfileMetrics
+ """
+ super().__init__(**kwargs)
+ self.metrics = metrics
+
+
+class ManagedClusterAzureMonitorProfileKubeStateMetrics(_serialization.Model): # pylint: disable=name-too-long
+ """Kube State Metrics profile for the Azure Managed Prometheus addon. These optional settings are
+ for the kube-state-metrics pod that is deployed with the addon. See
+ aka.ms/AzureManagedPrometheus-optional-parameters for details.
+
+ :ivar metric_labels_allowlist: Comma-separated list of additional Kubernetes label keys that
+ will be used in the resource's labels metric (Example:
+ 'namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...'). By default the metric contains only
+ resource name and namespace labels.
+ :vartype metric_labels_allowlist: str
+ :ivar metric_annotations_allow_list: Comma-separated list of Kubernetes annotation keys that
+ will be used in the resource's labels metric (Example:
+ 'namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...'). By default the metric
+ contains only resource name and namespace labels.
+ :vartype metric_annotations_allow_list: str
+ """
+
+ _attribute_map = {
+ "metric_labels_allowlist": {"key": "metricLabelsAllowlist", "type": "str"},
+ "metric_annotations_allow_list": {"key": "metricAnnotationsAllowList", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric_labels_allowlist: Optional[str] = None,
+ metric_annotations_allow_list: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword metric_labels_allowlist: Comma-separated list of additional Kubernetes label keys that
+ will be used in the resource's labels metric (Example:
+ 'namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...'). By default the metric contains only
+ resource name and namespace labels.
+ :paramtype metric_labels_allowlist: str
+ :keyword metric_annotations_allow_list: Comma-separated list of Kubernetes annotation keys that
+ will be used in the resource's labels metric (Example:
+ 'namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...'). By default the metric
+ contains only resource name and namespace labels.
+ :paramtype metric_annotations_allow_list: str
+ """
+ super().__init__(**kwargs)
+ self.metric_labels_allowlist = metric_labels_allowlist
+ self.metric_annotations_allow_list = metric_annotations_allow_list
+
+
+class ManagedClusterAzureMonitorProfileMetrics(_serialization.Model):
+ """Metrics profile for the Azure Monitor managed service for Prometheus addon. Collect
+ out-of-the-box Kubernetes infrastructure metrics to send to an Azure Monitor Workspace and
+ configure additional scraping for custom targets. See aka.ms/AzureManagedPrometheus for an
+ overview.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar enabled: Whether to enable or disable the Azure Managed Prometheus addon for Prometheus
+ monitoring. See aka.ms/AzureManagedPrometheus-aks-enable for details on enabling and disabling.
+ Required.
+ :vartype enabled: bool
+ :ivar kube_state_metrics: Kube State Metrics profile for the Azure Managed Prometheus addon.
+ These optional settings are for the kube-state-metrics pod that is deployed with the addon. See
+ aka.ms/AzureManagedPrometheus-optional-parameters for details.
+ :vartype kube_state_metrics:
+ ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfileKubeStateMetrics
+ """
+
+ _validation = {
+ "enabled": {"required": True},
+ }
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "kube_state_metrics": {"key": "kubeStateMetrics", "type": "ManagedClusterAzureMonitorProfileKubeStateMetrics"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enabled: bool,
+ kube_state_metrics: Optional["_models.ManagedClusterAzureMonitorProfileKubeStateMetrics"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enabled: Whether to enable or disable the Azure Managed Prometheus addon for
+ Prometheus monitoring. See aka.ms/AzureManagedPrometheus-aks-enable for details on enabling and
+ disabling. Required.
+ :paramtype enabled: bool
+ :keyword kube_state_metrics: Kube State Metrics profile for the Azure Managed Prometheus addon.
+ These optional settings are for the kube-state-metrics pod that is deployed with the addon. See
+ aka.ms/AzureManagedPrometheus-optional-parameters for details.
+ :paramtype kube_state_metrics:
+ ~azure.mgmt.containerservice.models.ManagedClusterAzureMonitorProfileKubeStateMetrics
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.kube_state_metrics = kube_state_metrics
+
+
+class ManagedClusterBootstrapProfile(_serialization.Model):
+ """The bootstrap profile.
+
+ :ivar artifact_source: The artifact source. The source where the artifacts are downloaded from.
+ Known values are: "Cache" and "Direct".
+ :vartype artifact_source: str or ~azure.mgmt.containerservice.models.ArtifactSource
+ :ivar container_registry_id: The resource Id of Azure Container Registry. The registry must
+ have private network access, premium SKU and zone redundancy.
+ :vartype container_registry_id: str
+ """
+
+ _attribute_map = {
+ "artifact_source": {"key": "artifactSource", "type": "str"},
+ "container_registry_id": {"key": "containerRegistryId", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ artifact_source: Union[str, "_models.ArtifactSource"] = "Direct",
+ container_registry_id: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword artifact_source: The artifact source. The source where the artifacts are downloaded
+ from. Known values are: "Cache" and "Direct".
+ :paramtype artifact_source: str or ~azure.mgmt.containerservice.models.ArtifactSource
+ :keyword container_registry_id: The resource Id of Azure Container Registry. The registry must
+ have private network access, premium SKU and zone redundancy.
+ :paramtype container_registry_id: str
+ """
+ super().__init__(**kwargs)
+ self.artifact_source = artifact_source
+ self.container_registry_id = container_registry_id
+
+
+class ManagedClusterCostAnalysis(_serialization.Model):
+ """The cost analysis configuration for the cluster.
+
+ :ivar enabled: Whether to enable cost analysis. The Managed Cluster sku.tier must be set to
+ 'Standard' or 'Premium' to enable this feature. Enabling this will add Kubernetes Namespace and
+ Deployment details to the Cost Analysis views in the Azure portal. If not specified, the
+ default is false. For more information see aka.ms/aks/docs/cost-analysis.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable cost analysis. The Managed Cluster sku.tier must be set to
+ 'Standard' or 'Premium' to enable this feature. Enabling this will add Kubernetes Namespace and
+ Deployment details to the Cost Analysis views in the Azure portal. If not specified, the
+ default is false. For more information see aka.ms/aks/docs/cost-analysis.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterHTTPProxyConfig(_serialization.Model):
+ """Cluster HTTP proxy configuration.
+
+ :ivar http_proxy: The HTTP proxy server endpoint to use.
+ :vartype http_proxy: str
+ :ivar https_proxy: The HTTPS proxy server endpoint to use.
+ :vartype https_proxy: str
+ :ivar no_proxy: The endpoints that should not go through proxy.
+ :vartype no_proxy: list[str]
+ :ivar trusted_ca: Alternative CA cert to use for connecting to proxy servers.
+ :vartype trusted_ca: str
+ """
+
+ _attribute_map = {
+ "http_proxy": {"key": "httpProxy", "type": "str"},
+ "https_proxy": {"key": "httpsProxy", "type": "str"},
+ "no_proxy": {"key": "noProxy", "type": "[str]"},
+ "trusted_ca": {"key": "trustedCa", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ http_proxy: Optional[str] = None,
+ https_proxy: Optional[str] = None,
+ no_proxy: Optional[list[str]] = None,
+ trusted_ca: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword http_proxy: The HTTP proxy server endpoint to use.
+ :paramtype http_proxy: str
+ :keyword https_proxy: The HTTPS proxy server endpoint to use.
+ :paramtype https_proxy: str
+ :keyword no_proxy: The endpoints that should not go through proxy.
+ :paramtype no_proxy: list[str]
+ :keyword trusted_ca: Alternative CA cert to use for connecting to proxy servers.
+ :paramtype trusted_ca: str
+ """
+ super().__init__(**kwargs)
+ self.http_proxy = http_proxy
+ self.https_proxy = https_proxy
+ self.no_proxy = no_proxy
+ self.trusted_ca = trusted_ca
+
+
+class ManagedClusterIdentity(_serialization.Model):
+ """Identity for the managed cluster.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar principal_id: The principal id of the system assigned identity which is used by master
+ components.
+ :vartype principal_id: str
+ :ivar tenant_id: The tenant id of the system assigned identity which is used by master
+ components.
+ :vartype tenant_id: str
+ :ivar type: The type of identity used for the managed cluster. For more information see `use
+ managed identities in AKS `_. Known
+ values are: "SystemAssigned", "UserAssigned", and "None".
+ :vartype type: str or ~azure.mgmt.containerservice.models.ResourceIdentityType
+ :ivar delegated_resources: The delegated identity resources assigned to this managed cluster.
+ This can only be set by another Azure Resource Provider, and managed cluster only accept one
+ delegated identity resource. Internal use only.
+ :vartype delegated_resources: dict[str, ~azure.mgmt.containerservice.models.DelegatedResource]
+ :ivar user_assigned_identities: The user identity associated with the managed cluster. This
+ identity will be used in control plane. Only one user assigned identity is allowed. The keys
+ must be ARM resource IDs in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
+ :vartype user_assigned_identities: dict[str,
+ ~azure.mgmt.containerservice.models.ManagedServiceIdentityUserAssignedIdentitiesValue]
+ """
+
+ _validation = {
+ "principal_id": {"readonly": True},
+ "tenant_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "principal_id": {"key": "principalId", "type": "str"},
+ "tenant_id": {"key": "tenantId", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "delegated_resources": {"key": "delegatedResources", "type": "{DelegatedResource}"},
+ "user_assigned_identities": {
+ "key": "userAssignedIdentities",
+ "type": "{ManagedServiceIdentityUserAssignedIdentitiesValue}",
+ },
+ }
+
+ def __init__(
+ self,
+ *,
+ type: Optional[Union[str, "_models.ResourceIdentityType"]] = None,
+ delegated_resources: Optional[dict[str, "_models.DelegatedResource"]] = None,
+ user_assigned_identities: Optional[
+ dict[str, "_models.ManagedServiceIdentityUserAssignedIdentitiesValue"]
+ ] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword type: The type of identity used for the managed cluster. For more information see `use
+ managed identities in AKS `_. Known
+ values are: "SystemAssigned", "UserAssigned", and "None".
+ :paramtype type: str or ~azure.mgmt.containerservice.models.ResourceIdentityType
+ :keyword delegated_resources: The delegated identity resources assigned to this managed
+ cluster. This can only be set by another Azure Resource Provider, and managed cluster only
+ accept one delegated identity resource. Internal use only.
+ :paramtype delegated_resources: dict[str,
+ ~azure.mgmt.containerservice.models.DelegatedResource]
+ :keyword user_assigned_identities: The user identity associated with the managed cluster. This
+ identity will be used in control plane. Only one user assigned identity is allowed. The keys
+ must be ARM resource IDs in the form:
+ '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
+ :paramtype user_assigned_identities: dict[str,
+ ~azure.mgmt.containerservice.models.ManagedServiceIdentityUserAssignedIdentitiesValue]
+ """
+ super().__init__(**kwargs)
+ self.principal_id: Optional[str] = None
+ self.tenant_id: Optional[str] = None
+ self.type = type
+ self.delegated_resources = delegated_resources
+ self.user_assigned_identities = user_assigned_identities
+
+
+class ManagedClusterIngressProfile(_serialization.Model):
+ """Ingress profile for the container service cluster.
+
+ :ivar web_app_routing: App Routing settings for the ingress profile. You can find an overview
+ and onboarding guide for this feature at
+ https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default.
+ :vartype web_app_routing:
+ ~azure.mgmt.containerservice.models.ManagedClusterIngressProfileWebAppRouting
+ """
+
+ _attribute_map = {
+ "web_app_routing": {"key": "webAppRouting", "type": "ManagedClusterIngressProfileWebAppRouting"},
+ }
+
+ def __init__(
+ self, *, web_app_routing: Optional["_models.ManagedClusterIngressProfileWebAppRouting"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword web_app_routing: App Routing settings for the ingress profile. You can find an
+ overview and onboarding guide for this feature at
+ https://learn.microsoft.com/en-us/azure/aks/app-routing?tabs=default%2Cdeploy-app-default.
+ :paramtype web_app_routing:
+ ~azure.mgmt.containerservice.models.ManagedClusterIngressProfileWebAppRouting
+ """
+ super().__init__(**kwargs)
+ self.web_app_routing = web_app_routing
+
+
+class ManagedClusterIngressProfileNginx(_serialization.Model):
+ """ManagedClusterIngressProfileNginx.
+
+ :ivar default_ingress_controller_type: Ingress type for the default NginxIngressController
+ custom resource. Known values are: "AnnotationControlled", "External", "Internal", and "None".
+ :vartype default_ingress_controller_type: str or
+ ~azure.mgmt.containerservice.models.NginxIngressControllerType
+ """
+
+ _attribute_map = {
+ "default_ingress_controller_type": {"key": "defaultIngressControllerType", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ default_ingress_controller_type: Optional[Union[str, "_models.NginxIngressControllerType"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword default_ingress_controller_type: Ingress type for the default NginxIngressController
+ custom resource. Known values are: "AnnotationControlled", "External", "Internal", and "None".
+ :paramtype default_ingress_controller_type: str or
+ ~azure.mgmt.containerservice.models.NginxIngressControllerType
+ """
+ super().__init__(**kwargs)
+ self.default_ingress_controller_type = default_ingress_controller_type
+
+
+class ManagedClusterIngressProfileWebAppRouting(_serialization.Model): # pylint: disable=name-too-long
+ """Application Routing add-on settings for the ingress profile.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar enabled: Whether to enable the Application Routing add-on.
+ :vartype enabled: bool
+ :ivar dns_zone_resource_ids: Resource IDs of the DNS zones to be associated with the
+ Application Routing add-on. Used only when Application Routing add-on is enabled. Public and
+ private DNS zones can be in different resource groups, but all public DNS zones must be in the
+ same resource group and all private DNS zones must be in the same resource group.
+ :vartype dns_zone_resource_ids: list[str]
+ :ivar nginx: Configuration for the default NginxIngressController. See more at
+ https://learn.microsoft.com/en-us/azure/aks/app-routing-nginx-configuration#the-default-nginx-ingress-controller.
+ :vartype nginx: ~azure.mgmt.containerservice.models.ManagedClusterIngressProfileNginx
+ :ivar identity: Managed identity of the Application Routing add-on. This is the identity that
+ should be granted permissions, for example, to manage the associated Azure DNS resource and get
+ certificates from Azure Key Vault. See `this overview of the add-on
+ `_ for more
+ instructions.
+ :vartype identity: ~azure.mgmt.containerservice.models.UserAssignedIdentity
+ """
+
+ _validation = {
+ "dns_zone_resource_ids": {"max_items": 5, "min_items": 0},
+ "identity": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "dns_zone_resource_ids": {"key": "dnsZoneResourceIds", "type": "[str]"},
+ "nginx": {"key": "nginx", "type": "ManagedClusterIngressProfileNginx"},
+ "identity": {"key": "identity", "type": "UserAssignedIdentity"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enabled: Optional[bool] = None,
+ dns_zone_resource_ids: Optional[list[str]] = None,
+ nginx: Optional["_models.ManagedClusterIngressProfileNginx"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enabled: Whether to enable the Application Routing add-on.
+ :paramtype enabled: bool
+ :keyword dns_zone_resource_ids: Resource IDs of the DNS zones to be associated with the
+ Application Routing add-on. Used only when Application Routing add-on is enabled. Public and
+ private DNS zones can be in different resource groups, but all public DNS zones must be in the
+ same resource group and all private DNS zones must be in the same resource group.
+ :paramtype dns_zone_resource_ids: list[str]
+ :keyword nginx: Configuration for the default NginxIngressController. See more at
+ https://learn.microsoft.com/en-us/azure/aks/app-routing-nginx-configuration#the-default-nginx-ingress-controller.
+ :paramtype nginx: ~azure.mgmt.containerservice.models.ManagedClusterIngressProfileNginx
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.dns_zone_resource_ids = dns_zone_resource_ids
+ self.nginx = nginx
+ self.identity: Optional["_models.UserAssignedIdentity"] = None
+
+
+class ManagedClusterListResult(_serialization.Model):
+ """The response from the List Managed Clusters operation.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of managed clusters.
+ :vartype value: list[~azure.mgmt.containerservice.models.ManagedCluster]
+ :ivar next_link: The URL to get the next set of managed cluster results.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[ManagedCluster]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.ManagedCluster"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The list of managed clusters.
+ :paramtype value: list[~azure.mgmt.containerservice.models.ManagedCluster]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link: Optional[str] = None
+
+
+class ManagedClusterLoadBalancerProfile(_serialization.Model):
+ """Profile of the managed cluster load balancer.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer.
+ :vartype managed_outbound_i_ps:
+ ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs
+ :ivar outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load balancer.
+ :vartype outbound_ip_prefixes:
+ ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes
+ :ivar outbound_i_ps: Desired outbound IP resources for the cluster load balancer.
+ :vartype outbound_i_ps:
+ ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileOutboundIPs
+ :ivar effective_outbound_i_ps: The effective outbound IP resources of the cluster load
+ balancer.
+ :vartype effective_outbound_i_ps: list[~azure.mgmt.containerservice.models.ResourceReference]
+ :ivar allocated_outbound_ports: The desired number of allocated SNAT ports per VM. Allowed
+ values are in the range of 0 to 64000 (inclusive). The default value is 0 which results in
+ Azure dynamically allocating ports.
+ :vartype allocated_outbound_ports: int
+ :ivar idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values
+ are in the range of 4 to 120 (inclusive). The default value is 30 minutes.
+ :vartype idle_timeout_in_minutes: int
+ :ivar enable_multiple_standard_load_balancers: Enable multiple standard load balancers per AKS
+ cluster or not.
+ :vartype enable_multiple_standard_load_balancers: bool
+ :ivar backend_pool_type: The type of the managed inbound Load Balancer BackendPool. Known
+ values are: "NodeIPConfiguration" and "NodeIP".
+ :vartype backend_pool_type: str or ~azure.mgmt.containerservice.models.BackendPoolType
+ """
+
+ _validation = {
+ "effective_outbound_i_ps": {"readonly": True},
+ "allocated_outbound_ports": {"maximum": 64000, "minimum": 0},
+ "idle_timeout_in_minutes": {"maximum": 120, "minimum": 4},
+ }
+
+ _attribute_map = {
+ "managed_outbound_i_ps": {
+ "key": "managedOutboundIPs",
+ "type": "ManagedClusterLoadBalancerProfileManagedOutboundIPs",
+ },
+ "outbound_ip_prefixes": {
+ "key": "outboundIPPrefixes",
+ "type": "ManagedClusterLoadBalancerProfileOutboundIPPrefixes",
+ },
+ "outbound_i_ps": {"key": "outboundIPs", "type": "ManagedClusterLoadBalancerProfileOutboundIPs"},
+ "effective_outbound_i_ps": {"key": "effectiveOutboundIPs", "type": "[ResourceReference]"},
+ "allocated_outbound_ports": {"key": "allocatedOutboundPorts", "type": "int"},
+ "idle_timeout_in_minutes": {"key": "idleTimeoutInMinutes", "type": "int"},
+ "enable_multiple_standard_load_balancers": {"key": "enableMultipleStandardLoadBalancers", "type": "bool"},
+ "backend_pool_type": {"key": "backendPoolType", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ managed_outbound_i_ps: Optional["_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs"] = None,
+ outbound_ip_prefixes: Optional["_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes"] = None,
+ outbound_i_ps: Optional["_models.ManagedClusterLoadBalancerProfileOutboundIPs"] = None,
+ allocated_outbound_ports: int = 0,
+ idle_timeout_in_minutes: int = 30,
+ enable_multiple_standard_load_balancers: Optional[bool] = None,
+ backend_pool_type: Union[str, "_models.BackendPoolType"] = "NodeIPConfiguration",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer.
+ :paramtype managed_outbound_i_ps:
+ ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs
+ :keyword outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load
+ balancer.
+ :paramtype outbound_ip_prefixes:
+ ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes
+ :keyword outbound_i_ps: Desired outbound IP resources for the cluster load balancer.
+ :paramtype outbound_i_ps:
+ ~azure.mgmt.containerservice.models.ManagedClusterLoadBalancerProfileOutboundIPs
+ :keyword allocated_outbound_ports: The desired number of allocated SNAT ports per VM. Allowed
+ values are in the range of 0 to 64000 (inclusive). The default value is 0 which results in
+ Azure dynamically allocating ports.
+ :paramtype allocated_outbound_ports: int
+ :keyword idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values
+ are in the range of 4 to 120 (inclusive). The default value is 30 minutes.
+ :paramtype idle_timeout_in_minutes: int
+ :keyword enable_multiple_standard_load_balancers: Enable multiple standard load balancers per
+ AKS cluster or not.
+ :paramtype enable_multiple_standard_load_balancers: bool
+ :keyword backend_pool_type: The type of the managed inbound Load Balancer BackendPool. Known
+ values are: "NodeIPConfiguration" and "NodeIP".
+ :paramtype backend_pool_type: str or ~azure.mgmt.containerservice.models.BackendPoolType
+ """
+ super().__init__(**kwargs)
+ self.managed_outbound_i_ps = managed_outbound_i_ps
+ self.outbound_ip_prefixes = outbound_ip_prefixes
+ self.outbound_i_ps = outbound_i_ps
+ self.effective_outbound_i_ps: Optional[list["_models.ResourceReference"]] = None
+ self.allocated_outbound_ports = allocated_outbound_ports
+ self.idle_timeout_in_minutes = idle_timeout_in_minutes
+ self.enable_multiple_standard_load_balancers = enable_multiple_standard_load_balancers
+ self.backend_pool_type = backend_pool_type
+
+
+class ManagedClusterLoadBalancerProfileManagedOutboundIPs(_serialization.Model): # pylint: disable=name-too-long
+ """Desired managed outbound IPs for the cluster load balancer.
+
+ :ivar count: The desired number of IPv4 outbound IPs created/managed by Azure for the cluster
+ load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value
+ is 1.
+ :vartype count: int
+ :ivar count_ipv6: The desired number of IPv6 outbound IPs created/managed by Azure for the
+ cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default
+ value is 0 for single-stack and 1 for dual-stack.
+ :vartype count_ipv6: int
+ """
+
+ _validation = {
+ "count": {"maximum": 100, "minimum": 1},
+ "count_ipv6": {"maximum": 100, "minimum": 0},
+ }
+
+ _attribute_map = {
+ "count": {"key": "count", "type": "int"},
+ "count_ipv6": {"key": "countIPv6", "type": "int"},
+ }
+
+ def __init__(self, *, count: int = 1, count_ipv6: int = 0, **kwargs: Any) -> None:
+ """
+ :keyword count: The desired number of IPv4 outbound IPs created/managed by Azure for the
+ cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default
+ value is 1.
+ :paramtype count: int
+ :keyword count_ipv6: The desired number of IPv6 outbound IPs created/managed by Azure for the
+ cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default
+ value is 0 for single-stack and 1 for dual-stack.
+ :paramtype count_ipv6: int
+ """
+ super().__init__(**kwargs)
+ self.count = count
+ self.count_ipv6 = count_ipv6
+
+
+class ManagedClusterLoadBalancerProfileOutboundIPPrefixes(_serialization.Model): # pylint: disable=name-too-long
+ """Desired outbound IP Prefix resources for the cluster load balancer.
+
+ :ivar public_ip_prefixes: A list of public IP prefix resources.
+ :vartype public_ip_prefixes: list[~azure.mgmt.containerservice.models.ResourceReference]
+ """
+
+ _attribute_map = {
+ "public_ip_prefixes": {"key": "publicIPPrefixes", "type": "[ResourceReference]"},
+ }
+
+ def __init__(
+ self, *, public_ip_prefixes: Optional[list["_models.ResourceReference"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword public_ip_prefixes: A list of public IP prefix resources.
+ :paramtype public_ip_prefixes: list[~azure.mgmt.containerservice.models.ResourceReference]
+ """
+ super().__init__(**kwargs)
+ self.public_ip_prefixes = public_ip_prefixes
+
+
+class ManagedClusterLoadBalancerProfileOutboundIPs(_serialization.Model): # pylint: disable=name-too-long
+ """Desired outbound IP resources for the cluster load balancer.
+
+ :ivar public_i_ps: A list of public IP resources.
+ :vartype public_i_ps: list[~azure.mgmt.containerservice.models.ResourceReference]
+ """
+
+ _attribute_map = {
+ "public_i_ps": {"key": "publicIPs", "type": "[ResourceReference]"},
+ }
+
+ def __init__(self, *, public_i_ps: Optional[list["_models.ResourceReference"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword public_i_ps: A list of public IP resources.
+ :paramtype public_i_ps: list[~azure.mgmt.containerservice.models.ResourceReference]
+ """
+ super().__init__(**kwargs)
+ self.public_i_ps = public_i_ps
+
+
+class ManagedClusterManagedOutboundIPProfile(_serialization.Model):
+ """Profile of the managed outbound IP resources of the managed cluster.
+
+ :ivar count: The desired number of outbound IPs created/managed by Azure. Allowed values must
+ be in the range of 1 to 16 (inclusive). The default value is 1.
+ :vartype count: int
+ """
+
+ _validation = {
+ "count": {"maximum": 16, "minimum": 1},
+ }
+
+ _attribute_map = {
+ "count": {"key": "count", "type": "int"},
+ }
+
+ def __init__(self, *, count: int = 1, **kwargs: Any) -> None:
+ """
+ :keyword count: The desired number of outbound IPs created/managed by Azure. Allowed values
+ must be in the range of 1 to 16 (inclusive). The default value is 1.
+ :paramtype count: int
+ """
+ super().__init__(**kwargs)
+ self.count = count
+
+
+class ManagedClusterMetricsProfile(_serialization.Model):
+ """The metrics profile for the ManagedCluster.
+
+ :ivar cost_analysis: The configuration for detailed per-Kubernetes resource cost analysis.
+ :vartype cost_analysis: ~azure.mgmt.containerservice.models.ManagedClusterCostAnalysis
+ """
+
+ _attribute_map = {
+ "cost_analysis": {"key": "costAnalysis", "type": "ManagedClusterCostAnalysis"},
+ }
+
+ def __init__(self, *, cost_analysis: Optional["_models.ManagedClusterCostAnalysis"] = None, **kwargs: Any) -> None:
+ """
+ :keyword cost_analysis: The configuration for detailed per-Kubernetes resource cost analysis.
+ :paramtype cost_analysis: ~azure.mgmt.containerservice.models.ManagedClusterCostAnalysis
+ """
+ super().__init__(**kwargs)
+ self.cost_analysis = cost_analysis
+
+
+class ManagedClusterNATGatewayProfile(_serialization.Model):
+ """Profile of the managed cluster NAT gateway.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar managed_outbound_ip_profile: Profile of the managed outbound IP resources of the cluster
+ NAT gateway.
+ :vartype managed_outbound_ip_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterManagedOutboundIPProfile
+ :ivar effective_outbound_i_ps: The effective outbound IP resources of the cluster NAT gateway.
+ :vartype effective_outbound_i_ps: list[~azure.mgmt.containerservice.models.ResourceReference]
+ :ivar idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values
+ are in the range of 4 to 120 (inclusive). The default value is 4 minutes.
+ :vartype idle_timeout_in_minutes: int
+ """
+
+ _validation = {
+ "effective_outbound_i_ps": {"readonly": True},
+ "idle_timeout_in_minutes": {"maximum": 120, "minimum": 4},
+ }
+
+ _attribute_map = {
+ "managed_outbound_ip_profile": {
+ "key": "managedOutboundIPProfile",
+ "type": "ManagedClusterManagedOutboundIPProfile",
+ },
+ "effective_outbound_i_ps": {"key": "effectiveOutboundIPs", "type": "[ResourceReference]"},
+ "idle_timeout_in_minutes": {"key": "idleTimeoutInMinutes", "type": "int"},
+ }
+
+ def __init__(
+ self,
+ *,
+ managed_outbound_ip_profile: Optional["_models.ManagedClusterManagedOutboundIPProfile"] = None,
+ idle_timeout_in_minutes: int = 4,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword managed_outbound_ip_profile: Profile of the managed outbound IP resources of the
+ cluster NAT gateway.
+ :paramtype managed_outbound_ip_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterManagedOutboundIPProfile
+ :keyword idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values
+ are in the range of 4 to 120 (inclusive). The default value is 4 minutes.
+ :paramtype idle_timeout_in_minutes: int
+ """
+ super().__init__(**kwargs)
+ self.managed_outbound_ip_profile = managed_outbound_ip_profile
+ self.effective_outbound_i_ps: Optional[list["_models.ResourceReference"]] = None
+ self.idle_timeout_in_minutes = idle_timeout_in_minutes
+
+
+class ManagedClusterNodeProvisioningProfile(_serialization.Model):
+ """ManagedClusterNodeProvisioningProfile.
+
+ :ivar mode: The node provisioning mode. If not specified, the default is Manual. Known values
+ are: "Manual" and "Auto".
+ :vartype mode: str or ~azure.mgmt.containerservice.models.NodeProvisioningMode
+ :ivar default_node_pools: The set of default Karpenter NodePools (CRDs) configured for node
+ provisioning. This field has no effect unless mode is 'Auto'. Warning: Changing this from Auto
+ to None on an existing cluster will cause the default Karpenter NodePools to be deleted, which
+ will drain and delete the nodes associated with those pools. It is strongly recommended to not
+ do this unless there are idle nodes ready to take the pods evicted by that action. If not
+ specified, the default is Auto. For more information see aka.ms/aks/nap#node-pools. Known
+ values are: "None" and "Auto".
+ :vartype default_node_pools: str or
+ ~azure.mgmt.containerservice.models.NodeProvisioningDefaultNodePools
+ """
+
+ _attribute_map = {
+ "mode": {"key": "mode", "type": "str"},
+ "default_node_pools": {"key": "defaultNodePools", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ mode: Optional[Union[str, "_models.NodeProvisioningMode"]] = None,
+ default_node_pools: Union[str, "_models.NodeProvisioningDefaultNodePools"] = "Auto",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword mode: The node provisioning mode. If not specified, the default is Manual. Known
+ values are: "Manual" and "Auto".
+ :paramtype mode: str or ~azure.mgmt.containerservice.models.NodeProvisioningMode
+ :keyword default_node_pools: The set of default Karpenter NodePools (CRDs) configured for node
+ provisioning. This field has no effect unless mode is 'Auto'. Warning: Changing this from Auto
+ to None on an existing cluster will cause the default Karpenter NodePools to be deleted, which
+ will drain and delete the nodes associated with those pools. It is strongly recommended to not
+ do this unless there are idle nodes ready to take the pods evicted by that action. If not
+ specified, the default is Auto. For more information see aka.ms/aks/nap#node-pools. Known
+ values are: "None" and "Auto".
+ :paramtype default_node_pools: str or
+ ~azure.mgmt.containerservice.models.NodeProvisioningDefaultNodePools
+ """
+ super().__init__(**kwargs)
+ self.mode = mode
+ self.default_node_pools = default_node_pools
+
+
+class ManagedClusterNodeResourceGroupProfile(_serialization.Model):
+ """Node resource group lockdown profile for a managed cluster.
+
+ :ivar restriction_level: The restriction level applied to the cluster's node resource group. If
+ not specified, the default is 'Unrestricted'. Known values are: "Unrestricted" and "ReadOnly".
+ :vartype restriction_level: str or ~azure.mgmt.containerservice.models.RestrictionLevel
+ """
+
+ _attribute_map = {
+ "restriction_level": {"key": "restrictionLevel", "type": "str"},
+ }
+
+ def __init__(
+ self, *, restriction_level: Optional[Union[str, "_models.RestrictionLevel"]] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword restriction_level: The restriction level applied to the cluster's node resource group.
+ If not specified, the default is 'Unrestricted'. Known values are: "Unrestricted" and
+ "ReadOnly".
+ :paramtype restriction_level: str or ~azure.mgmt.containerservice.models.RestrictionLevel
+ """
+ super().__init__(**kwargs)
+ self.restriction_level = restriction_level
+
+
+class ManagedClusterOIDCIssuerProfile(_serialization.Model):
+ """The OIDC issuer profile of the Managed Cluster.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar issuer_url: The OIDC issuer url of the Managed Cluster.
+ :vartype issuer_url: str
+ :ivar enabled: Whether the OIDC issuer is enabled.
+ :vartype enabled: bool
+ """
+
+ _validation = {
+ "issuer_url": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "issuer_url": {"key": "issuerURL", "type": "str"},
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether the OIDC issuer is enabled.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.issuer_url: Optional[str] = None
+ self.enabled = enabled
+
+
+class ManagedClusterPodIdentity(_serialization.Model):
+ """Details about the pod identity assigned to the Managed Cluster.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar name: The name of the pod identity. Required.
+ :vartype name: str
+ :ivar namespace: The namespace of the pod identity. Required.
+ :vartype namespace: str
+ :ivar binding_selector: The binding selector to use for the AzureIdentityBinding resource.
+ :vartype binding_selector: str
+ :ivar identity: The user assigned identity details. Required.
+ :vartype identity: ~azure.mgmt.containerservice.models.UserAssignedIdentity
+ :ivar provisioning_state: The current provisioning state of the pod identity. Known values are:
+ "Assigned", "Canceled", "Deleting", "Failed", "Succeeded", and "Updating".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningState
+ :ivar provisioning_info:
+ :vartype provisioning_info:
+ ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningInfo
+ """
+
+ _validation = {
+ "name": {"required": True},
+ "namespace": {"required": True},
+ "identity": {"required": True},
+ "provisioning_state": {"readonly": True},
+ "provisioning_info": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "name": {"key": "name", "type": "str"},
+ "namespace": {"key": "namespace", "type": "str"},
+ "binding_selector": {"key": "bindingSelector", "type": "str"},
+ "identity": {"key": "identity", "type": "UserAssignedIdentity"},
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "provisioning_info": {"key": "provisioningInfo", "type": "ManagedClusterPodIdentityProvisioningInfo"},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: str,
+ namespace: str,
+ identity: "_models.UserAssignedIdentity",
+ binding_selector: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword name: The name of the pod identity. Required.
+ :paramtype name: str
+ :keyword namespace: The namespace of the pod identity. Required.
+ :paramtype namespace: str
+ :keyword binding_selector: The binding selector to use for the AzureIdentityBinding resource.
+ :paramtype binding_selector: str
+ :keyword identity: The user assigned identity details. Required.
+ :paramtype identity: ~azure.mgmt.containerservice.models.UserAssignedIdentity
+ """
+ super().__init__(**kwargs)
+ self.name = name
+ self.namespace = namespace
+ self.binding_selector = binding_selector
+ self.identity = identity
+ self.provisioning_state: Optional[Union[str, "_models.ManagedClusterPodIdentityProvisioningState"]] = None
+ self.provisioning_info: Optional["_models.ManagedClusterPodIdentityProvisioningInfo"] = None
+
+
+class ManagedClusterPodIdentityException(_serialization.Model):
+ """A pod identity exception, which allows pods with certain labels to access the Azure Instance
+ Metadata Service (IMDS) endpoint without being intercepted by the node-managed identity (NMI)
+ server. See `disable AAD Pod Identity for a specific Pod/Application
+ `_ for more
+ details.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar name: The name of the pod identity exception. Required.
+ :vartype name: str
+ :ivar namespace: The namespace of the pod identity exception. Required.
+ :vartype namespace: str
+ :ivar pod_labels: The pod labels to match. Required.
+ :vartype pod_labels: dict[str, str]
+ """
+
+ _validation = {
+ "name": {"required": True},
+ "namespace": {"required": True},
+ "pod_labels": {"required": True},
+ }
+
+ _attribute_map = {
+ "name": {"key": "name", "type": "str"},
+ "namespace": {"key": "namespace", "type": "str"},
+ "pod_labels": {"key": "podLabels", "type": "{str}"},
+ }
+
+ def __init__(self, *, name: str, namespace: str, pod_labels: dict[str, str], **kwargs: Any) -> None:
+ """
+ :keyword name: The name of the pod identity exception. Required.
+ :paramtype name: str
+ :keyword namespace: The namespace of the pod identity exception. Required.
+ :paramtype namespace: str
+ :keyword pod_labels: The pod labels to match. Required.
+ :paramtype pod_labels: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.name = name
+ self.namespace = namespace
+ self.pod_labels = pod_labels
+
+
+class ManagedClusterPodIdentityProfile(_serialization.Model):
+ """The pod identity profile of the Managed Cluster. See `use AAD pod identity
+ `_ for more details on pod
+ identity integration.
+
+ :ivar enabled: Whether the pod identity addon is enabled.
+ :vartype enabled: bool
+ :ivar allow_network_plugin_kubenet: Whether pod identity is allowed to run on clusters with
+ Kubenet networking. Running in Kubenet is disabled by default due to the security related
+ nature of AAD Pod Identity and the risks of IP spoofing. See `using Kubenet network plugin with
+ AAD Pod Identity
+ `_
+ for more information.
+ :vartype allow_network_plugin_kubenet: bool
+ :ivar user_assigned_identities: The pod identities to use in the cluster.
+ :vartype user_assigned_identities:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentity]
+ :ivar user_assigned_identity_exceptions: The pod identity exceptions to allow.
+ :vartype user_assigned_identity_exceptions:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentityException]
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "allow_network_plugin_kubenet": {"key": "allowNetworkPluginKubenet", "type": "bool"},
+ "user_assigned_identities": {"key": "userAssignedIdentities", "type": "[ManagedClusterPodIdentity]"},
+ "user_assigned_identity_exceptions": {
+ "key": "userAssignedIdentityExceptions",
+ "type": "[ManagedClusterPodIdentityException]",
+ },
+ }
+
+ def __init__(
+ self,
+ *,
+ enabled: Optional[bool] = None,
+ allow_network_plugin_kubenet: Optional[bool] = None,
+ user_assigned_identities: Optional[list["_models.ManagedClusterPodIdentity"]] = None,
+ user_assigned_identity_exceptions: Optional[list["_models.ManagedClusterPodIdentityException"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enabled: Whether the pod identity addon is enabled.
+ :paramtype enabled: bool
+ :keyword allow_network_plugin_kubenet: Whether pod identity is allowed to run on clusters with
+ Kubenet networking. Running in Kubenet is disabled by default due to the security related
+ nature of AAD Pod Identity and the risks of IP spoofing. See `using Kubenet network plugin with
+ AAD Pod Identity
+ `_
+ for more information.
+ :paramtype allow_network_plugin_kubenet: bool
+ :keyword user_assigned_identities: The pod identities to use in the cluster.
+ :paramtype user_assigned_identities:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentity]
+ :keyword user_assigned_identity_exceptions: The pod identity exceptions to allow.
+ :paramtype user_assigned_identity_exceptions:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentityException]
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.allow_network_plugin_kubenet = allow_network_plugin_kubenet
+ self.user_assigned_identities = user_assigned_identities
+ self.user_assigned_identity_exceptions = user_assigned_identity_exceptions
+
+
+class ManagedClusterPodIdentityProvisioningError(_serialization.Model): # pylint: disable=name-too-long
+ """An error response from the pod identity provisioning.
+
+ :ivar error: Details about the error.
+ :vartype error:
+ ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningErrorBody
+ """
+
+ _attribute_map = {
+ "error": {"key": "error", "type": "ManagedClusterPodIdentityProvisioningErrorBody"},
+ }
+
+ def __init__(
+ self, *, error: Optional["_models.ManagedClusterPodIdentityProvisioningErrorBody"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword error: Details about the error.
+ :paramtype error:
+ ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningErrorBody
+ """
+ super().__init__(**kwargs)
+ self.error = error
+
+
+class ManagedClusterPodIdentityProvisioningErrorBody(_serialization.Model): # pylint: disable=name-too-long
+ """An error response from the pod identity provisioning.
+
+ :ivar code: An identifier for the error. Codes are invariant and are intended to be consumed
+ programmatically.
+ :vartype code: str
+ :ivar message: A message describing the error, intended to be suitable for display in a user
+ interface.
+ :vartype message: str
+ :ivar target: The target of the particular error. For example, the name of the property in
+ error.
+ :vartype target: str
+ :ivar details: A list of additional details about the error.
+ :vartype details:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningErrorBody]
+ """
+
+ _attribute_map = {
+ "code": {"key": "code", "type": "str"},
+ "message": {"key": "message", "type": "str"},
+ "target": {"key": "target", "type": "str"},
+ "details": {"key": "details", "type": "[ManagedClusterPodIdentityProvisioningErrorBody]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ code: Optional[str] = None,
+ message: Optional[str] = None,
+ target: Optional[str] = None,
+ details: Optional[list["_models.ManagedClusterPodIdentityProvisioningErrorBody"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword code: An identifier for the error. Codes are invariant and are intended to be consumed
+ programmatically.
+ :paramtype code: str
+ :keyword message: A message describing the error, intended to be suitable for display in a user
+ interface.
+ :paramtype message: str
+ :keyword target: The target of the particular error. For example, the name of the property in
+ error.
+ :paramtype target: str
+ :keyword details: A list of additional details about the error.
+ :paramtype details:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningErrorBody]
+ """
+ super().__init__(**kwargs)
+ self.code = code
+ self.message = message
+ self.target = target
+ self.details = details
+
+
+class ManagedClusterPodIdentityProvisioningInfo(_serialization.Model): # pylint: disable=name-too-long
+ """ManagedClusterPodIdentityProvisioningInfo.
+
+ :ivar error: Pod identity assignment error (if any).
+ :vartype error: ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningError
+ """
+
+ _attribute_map = {
+ "error": {"key": "error", "type": "ManagedClusterPodIdentityProvisioningError"},
+ }
+
+ def __init__(
+ self, *, error: Optional["_models.ManagedClusterPodIdentityProvisioningError"] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword error: Pod identity assignment error (if any).
+ :paramtype error:
+ ~azure.mgmt.containerservice.models.ManagedClusterPodIdentityProvisioningError
+ """
+ super().__init__(**kwargs)
+ self.error = error
+
+
+class ManagedClusterPoolUpgradeProfile(_serialization.Model):
+ """The list of available upgrade versions.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar kubernetes_version: The Kubernetes version (major.minor.patch). Required.
+ :vartype kubernetes_version: str
+ :ivar name: The Agent Pool name.
+ :vartype name: str
+ :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and
+ "Windows".
+ :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :ivar upgrades: List of orchestrator types and versions available for upgrade.
+ :vartype upgrades:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfileUpgradesItem]
+ """
+
+ _validation = {
+ "kubernetes_version": {"required": True},
+ "os_type": {"required": True},
+ }
+
+ _attribute_map = {
+ "kubernetes_version": {"key": "kubernetesVersion", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "os_type": {"key": "osType", "type": "str"},
+ "upgrades": {"key": "upgrades", "type": "[ManagedClusterPoolUpgradeProfileUpgradesItem]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ kubernetes_version: str,
+ os_type: Union[str, "_models.OSType"] = "Linux",
+ name: Optional[str] = None,
+ upgrades: Optional[list["_models.ManagedClusterPoolUpgradeProfileUpgradesItem"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword kubernetes_version: The Kubernetes version (major.minor.patch). Required.
+ :paramtype kubernetes_version: str
+ :keyword name: The Agent Pool name.
+ :paramtype name: str
+ :keyword os_type: The operating system type. The default is Linux. Known values are: "Linux"
+ and "Windows".
+ :paramtype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :keyword upgrades: List of orchestrator types and versions available for upgrade.
+ :paramtype upgrades:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfileUpgradesItem]
+ """
+ super().__init__(**kwargs)
+ self.kubernetes_version = kubernetes_version
+ self.name = name
+ self.os_type = os_type
+ self.upgrades = upgrades
+
+
+class ManagedClusterPoolUpgradeProfileUpgradesItem(_serialization.Model): # pylint: disable=name-too-long
+ """ManagedClusterPoolUpgradeProfileUpgradesItem.
+
+ :ivar kubernetes_version: The Kubernetes version (major.minor.patch).
+ :vartype kubernetes_version: str
+ :ivar is_preview: Whether the Kubernetes version is currently in preview.
+ :vartype is_preview: bool
+ """
+
+ _attribute_map = {
+ "kubernetes_version": {"key": "kubernetesVersion", "type": "str"},
+ "is_preview": {"key": "isPreview", "type": "bool"},
+ }
+
+ def __init__(
+ self, *, kubernetes_version: Optional[str] = None, is_preview: Optional[bool] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword kubernetes_version: The Kubernetes version (major.minor.patch).
+ :paramtype kubernetes_version: str
+ :keyword is_preview: Whether the Kubernetes version is currently in preview.
+ :paramtype is_preview: bool
+ """
+ super().__init__(**kwargs)
+ self.kubernetes_version = kubernetes_version
+ self.is_preview = is_preview
+
+
+class ManagedClusterPropertiesAutoScalerProfile(_serialization.Model): # pylint: disable=name-too-long
+ """Parameters to be applied to the cluster-autoscaler when enabled.
+
+ :ivar balance_similar_node_groups: Detects similar node pools and balances the number of nodes
+ between them. Valid values are 'true' and 'false'.
+ :vartype balance_similar_node_groups: str
+ :ivar daemonset_eviction_for_empty_nodes: DaemonSet pods will be gracefully terminated from
+ empty nodes. If set to true, all daemonset pods on empty nodes will be evicted before deletion
+ of the node. If the daemonset pod cannot be evicted another node will be chosen for scaling. If
+ set to false, the node will be deleted without ensuring that daemonset pods are deleted or
+ evicted.
+ :vartype daemonset_eviction_for_empty_nodes: bool
+ :ivar daemonset_eviction_for_occupied_nodes: DaemonSet pods will be gracefully terminated from
+ non-empty nodes. If set to true, all daemonset pods on occupied nodes will be evicted before
+ deletion of the node. If the daemonset pod cannot be evicted another node will be chosen for
+ scaling. If set to false, the node will be deleted without ensuring that daemonset pods are
+ deleted or evicted.
+ :vartype daemonset_eviction_for_occupied_nodes: bool
+ :ivar ignore_daemonsets_utilization: Should CA ignore DaemonSet pods when calculating resource
+ utilization for scaling down. If set to true, the resources used by daemonset will be taken
+ into account when making scaling down decisions.
+ :vartype ignore_daemonsets_utilization: bool
+ :ivar expander: The expander to use when scaling up. If not specified, the default is 'random'.
+ See `expanders
+ `_
+ for more information. Known values are: "least-waste", "most-pods", "priority", and "random".
+ :vartype expander: str or ~azure.mgmt.containerservice.models.Expander
+ :ivar max_empty_bulk_delete: The maximum number of empty nodes that can be deleted at the same
+ time. This must be a positive integer. The default is 10.
+ :vartype max_empty_bulk_delete: str
+ :ivar max_graceful_termination_sec: The maximum number of seconds the cluster autoscaler waits
+ for pod termination when trying to scale down a node. The default is 600.
+ :vartype max_graceful_termination_sec: str
+ :ivar max_node_provision_time: The maximum time the autoscaler waits for a node to be
+ provisioned. The default is '15m'. Values must be an integer followed by an 'm'. No unit of
+ time other than minutes (m) is supported.
+ :vartype max_node_provision_time: str
+ :ivar max_total_unready_percentage: The maximum percentage of unready nodes in the cluster.
+ After this percentage is exceeded, cluster autoscaler halts operations. The default is 45. The
+ maximum is 100 and the minimum is 0.
+ :vartype max_total_unready_percentage: str
+ :ivar new_pod_scale_up_delay: Ignore unscheduled pods before they're a certain age. For
+ scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler
+ could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a
+ certain age. The default is '0s'. Values must be an integer followed by a unit ('s' for
+ seconds, 'm' for minutes, 'h' for hours, etc).
+ :vartype new_pod_scale_up_delay: str
+ :ivar ok_total_unready_count: The number of allowed unready nodes, irrespective of
+ max-total-unready-percentage. This must be an integer. The default is 3.
+ :vartype ok_total_unready_count: str
+ :ivar scan_interval: How often cluster is reevaluated for scale up or down. The default is
+ '10'. Values must be an integer number of seconds.
+ :vartype scan_interval: str
+ :ivar scale_down_delay_after_add: How long after scale up that scale down evaluation resumes.
+ The default is '10m'. Values must be an integer followed by an 'm'. No unit of time other than
+ minutes (m) is supported.
+ :vartype scale_down_delay_after_add: str
+ :ivar scale_down_delay_after_delete: How long after node deletion that scale down evaluation
+ resumes. The default is the scan-interval. Values must be an integer followed by an 'm'. No
+ unit of time other than minutes (m) is supported.
+ :vartype scale_down_delay_after_delete: str
+ :ivar scale_down_delay_after_failure: How long after scale down failure that scale down
+ evaluation resumes. The default is '3m'. Values must be an integer followed by an 'm'. No unit
+ of time other than minutes (m) is supported.
+ :vartype scale_down_delay_after_failure: str
+ :ivar scale_down_unneeded_time: How long a node should be unneeded before it is eligible for
+ scale down. The default is '10m'. Values must be an integer followed by an 'm'. No unit of time
+ other than minutes (m) is supported.
+ :vartype scale_down_unneeded_time: str
+ :ivar scale_down_unready_time: How long an unready node should be unneeded before it is
+ eligible for scale down. The default is '20m'. Values must be an integer followed by an 'm'. No
+ unit of time other than minutes (m) is supported.
+ :vartype scale_down_unready_time: str
+ :ivar scale_down_utilization_threshold: Node utilization level, defined as sum of requested
+ resources divided by capacity, below which a node can be considered for scale down. The default
+ is '0.5'.
+ :vartype scale_down_utilization_threshold: str
+ :ivar skip_nodes_with_local_storage: If cluster autoscaler will skip deleting nodes with pods
+ with local storage, for example, EmptyDir or HostPath. The default is true.
+ :vartype skip_nodes_with_local_storage: str
+ :ivar skip_nodes_with_system_pods: If cluster autoscaler will skip deleting nodes with pods
+ from kube-system (except for DaemonSet or mirror pods). The default is true.
+ :vartype skip_nodes_with_system_pods: str
+ """
+
+ _attribute_map = {
+ "balance_similar_node_groups": {"key": "balance-similar-node-groups", "type": "str"},
+ "daemonset_eviction_for_empty_nodes": {"key": "daemonset-eviction-for-empty-nodes", "type": "bool"},
+ "daemonset_eviction_for_occupied_nodes": {"key": "daemonset-eviction-for-occupied-nodes", "type": "bool"},
+ "ignore_daemonsets_utilization": {"key": "ignore-daemonsets-utilization", "type": "bool"},
+ "expander": {"key": "expander", "type": "str"},
+ "max_empty_bulk_delete": {"key": "max-empty-bulk-delete", "type": "str"},
+ "max_graceful_termination_sec": {"key": "max-graceful-termination-sec", "type": "str"},
+ "max_node_provision_time": {"key": "max-node-provision-time", "type": "str"},
+ "max_total_unready_percentage": {"key": "max-total-unready-percentage", "type": "str"},
+ "new_pod_scale_up_delay": {"key": "new-pod-scale-up-delay", "type": "str"},
+ "ok_total_unready_count": {"key": "ok-total-unready-count", "type": "str"},
+ "scan_interval": {"key": "scan-interval", "type": "str"},
+ "scale_down_delay_after_add": {"key": "scale-down-delay-after-add", "type": "str"},
+ "scale_down_delay_after_delete": {"key": "scale-down-delay-after-delete", "type": "str"},
+ "scale_down_delay_after_failure": {"key": "scale-down-delay-after-failure", "type": "str"},
+ "scale_down_unneeded_time": {"key": "scale-down-unneeded-time", "type": "str"},
+ "scale_down_unready_time": {"key": "scale-down-unready-time", "type": "str"},
+ "scale_down_utilization_threshold": {"key": "scale-down-utilization-threshold", "type": "str"},
+ "skip_nodes_with_local_storage": {"key": "skip-nodes-with-local-storage", "type": "str"},
+ "skip_nodes_with_system_pods": {"key": "skip-nodes-with-system-pods", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ balance_similar_node_groups: Optional[str] = None,
+ daemonset_eviction_for_empty_nodes: Optional[bool] = None,
+ daemonset_eviction_for_occupied_nodes: Optional[bool] = None,
+ ignore_daemonsets_utilization: Optional[bool] = None,
+ expander: Optional[Union[str, "_models.Expander"]] = None,
+ max_empty_bulk_delete: Optional[str] = None,
+ max_graceful_termination_sec: Optional[str] = None,
+ max_node_provision_time: Optional[str] = None,
+ max_total_unready_percentage: Optional[str] = None,
+ new_pod_scale_up_delay: Optional[str] = None,
+ ok_total_unready_count: Optional[str] = None,
+ scan_interval: Optional[str] = None,
+ scale_down_delay_after_add: Optional[str] = None,
+ scale_down_delay_after_delete: Optional[str] = None,
+ scale_down_delay_after_failure: Optional[str] = None,
+ scale_down_unneeded_time: Optional[str] = None,
+ scale_down_unready_time: Optional[str] = None,
+ scale_down_utilization_threshold: Optional[str] = None,
+ skip_nodes_with_local_storage: Optional[str] = None,
+ skip_nodes_with_system_pods: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword balance_similar_node_groups: Detects similar node pools and balances the number of
+ nodes between them. Valid values are 'true' and 'false'.
+ :paramtype balance_similar_node_groups: str
+ :keyword daemonset_eviction_for_empty_nodes: DaemonSet pods will be gracefully terminated from
+ empty nodes. If set to true, all daemonset pods on empty nodes will be evicted before deletion
+ of the node. If the daemonset pod cannot be evicted another node will be chosen for scaling. If
+ set to false, the node will be deleted without ensuring that daemonset pods are deleted or
+ evicted.
+ :paramtype daemonset_eviction_for_empty_nodes: bool
+ :keyword daemonset_eviction_for_occupied_nodes: DaemonSet pods will be gracefully terminated
+ from non-empty nodes. If set to true, all daemonset pods on occupied nodes will be evicted
+ before deletion of the node. If the daemonset pod cannot be evicted another node will be chosen
+ for scaling. If set to false, the node will be deleted without ensuring that daemonset pods are
+ deleted or evicted.
+ :paramtype daemonset_eviction_for_occupied_nodes: bool
+ :keyword ignore_daemonsets_utilization: Should CA ignore DaemonSet pods when calculating
+ resource utilization for scaling down. If set to true, the resources used by daemonset will be
+ taken into account when making scaling down decisions.
+ :paramtype ignore_daemonsets_utilization: bool
+ :keyword expander: The expander to use when scaling up. If not specified, the default is
+ 'random'. See `expanders
+ `_
+ for more information. Known values are: "least-waste", "most-pods", "priority", and "random".
+ :paramtype expander: str or ~azure.mgmt.containerservice.models.Expander
+ :keyword max_empty_bulk_delete: The maximum number of empty nodes that can be deleted at the
+ same time. This must be a positive integer. The default is 10.
+ :paramtype max_empty_bulk_delete: str
+ :keyword max_graceful_termination_sec: The maximum number of seconds the cluster autoscaler
+ waits for pod termination when trying to scale down a node. The default is 600.
+ :paramtype max_graceful_termination_sec: str
+ :keyword max_node_provision_time: The maximum time the autoscaler waits for a node to be
+ provisioned. The default is '15m'. Values must be an integer followed by an 'm'. No unit of
+ time other than minutes (m) is supported.
+ :paramtype max_node_provision_time: str
+ :keyword max_total_unready_percentage: The maximum percentage of unready nodes in the cluster.
+ After this percentage is exceeded, cluster autoscaler halts operations. The default is 45. The
+ maximum is 100 and the minimum is 0.
+ :paramtype max_total_unready_percentage: str
+ :keyword new_pod_scale_up_delay: Ignore unscheduled pods before they're a certain age. For
+ scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler
+ could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a
+ certain age. The default is '0s'. Values must be an integer followed by a unit ('s' for
+ seconds, 'm' for minutes, 'h' for hours, etc).
+ :paramtype new_pod_scale_up_delay: str
+ :keyword ok_total_unready_count: The number of allowed unready nodes, irrespective of
+ max-total-unready-percentage. This must be an integer. The default is 3.
+ :paramtype ok_total_unready_count: str
+ :keyword scan_interval: How often cluster is reevaluated for scale up or down. The default is
+ '10'. Values must be an integer number of seconds.
+ :paramtype scan_interval: str
+ :keyword scale_down_delay_after_add: How long after scale up that scale down evaluation
+ resumes. The default is '10m'. Values must be an integer followed by an 'm'. No unit of time
+ other than minutes (m) is supported.
+ :paramtype scale_down_delay_after_add: str
+ :keyword scale_down_delay_after_delete: How long after node deletion that scale down evaluation
+ resumes. The default is the scan-interval. Values must be an integer followed by an 'm'. No
+ unit of time other than minutes (m) is supported.
+ :paramtype scale_down_delay_after_delete: str
+ :keyword scale_down_delay_after_failure: How long after scale down failure that scale down
+ evaluation resumes. The default is '3m'. Values must be an integer followed by an 'm'. No unit
+ of time other than minutes (m) is supported.
+ :paramtype scale_down_delay_after_failure: str
+ :keyword scale_down_unneeded_time: How long a node should be unneeded before it is eligible for
+ scale down. The default is '10m'. Values must be an integer followed by an 'm'. No unit of time
+ other than minutes (m) is supported.
+ :paramtype scale_down_unneeded_time: str
+ :keyword scale_down_unready_time: How long an unready node should be unneeded before it is
+ eligible for scale down. The default is '20m'. Values must be an integer followed by an 'm'. No
+ unit of time other than minutes (m) is supported.
+ :paramtype scale_down_unready_time: str
+ :keyword scale_down_utilization_threshold: Node utilization level, defined as sum of requested
+ resources divided by capacity, below which a node can be considered for scale down. The default
+ is '0.5'.
+ :paramtype scale_down_utilization_threshold: str
+ :keyword skip_nodes_with_local_storage: If cluster autoscaler will skip deleting nodes with
+ pods with local storage, for example, EmptyDir or HostPath. The default is true.
+ :paramtype skip_nodes_with_local_storage: str
+ :keyword skip_nodes_with_system_pods: If cluster autoscaler will skip deleting nodes with pods
+ from kube-system (except for DaemonSet or mirror pods). The default is true.
+ :paramtype skip_nodes_with_system_pods: str
+ """
+ super().__init__(**kwargs)
+ self.balance_similar_node_groups = balance_similar_node_groups
+ self.daemonset_eviction_for_empty_nodes = daemonset_eviction_for_empty_nodes
+ self.daemonset_eviction_for_occupied_nodes = daemonset_eviction_for_occupied_nodes
+ self.ignore_daemonsets_utilization = ignore_daemonsets_utilization
+ self.expander = expander
+ self.max_empty_bulk_delete = max_empty_bulk_delete
+ self.max_graceful_termination_sec = max_graceful_termination_sec
+ self.max_node_provision_time = max_node_provision_time
+ self.max_total_unready_percentage = max_total_unready_percentage
+ self.new_pod_scale_up_delay = new_pod_scale_up_delay
+ self.ok_total_unready_count = ok_total_unready_count
+ self.scan_interval = scan_interval
+ self.scale_down_delay_after_add = scale_down_delay_after_add
+ self.scale_down_delay_after_delete = scale_down_delay_after_delete
+ self.scale_down_delay_after_failure = scale_down_delay_after_failure
+ self.scale_down_unneeded_time = scale_down_unneeded_time
+ self.scale_down_unready_time = scale_down_unready_time
+ self.scale_down_utilization_threshold = scale_down_utilization_threshold
+ self.skip_nodes_with_local_storage = skip_nodes_with_local_storage
+ self.skip_nodes_with_system_pods = skip_nodes_with_system_pods
+
+
+class ManagedClusterSecurityProfile(_serialization.Model):
+ """Security profile for the container service cluster.
+
+ :ivar defender: Microsoft Defender settings for the security profile.
+ :vartype defender: ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileDefender
+ :ivar azure_key_vault_kms: Azure Key Vault `key management service
+ `_ settings for the security
+ profile.
+ :vartype azure_key_vault_kms: ~azure.mgmt.containerservice.models.AzureKeyVaultKms
+ :ivar workload_identity: Workload identity settings for the security profile. Workload identity
+ enables Kubernetes applications to access Azure cloud resources securely with Azure AD. See
+ https://aka.ms/aks/wi for more details.
+ :vartype workload_identity:
+ ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileWorkloadIdentity
+ :ivar image_cleaner: Image Cleaner settings for the security profile.
+ :vartype image_cleaner:
+ ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileImageCleaner
+ :ivar custom_ca_trust_certificates: A list of up to 10 base64 encoded CAs that will be added to
+ the trust store on all nodes in the cluster. For more information see `Custom CA Trust
+ Certificates `_.
+ :vartype custom_ca_trust_certificates: list[bytes]
+ """
+
+ _validation = {
+ "custom_ca_trust_certificates": {"max_items": 10, "min_items": 0},
+ }
+
+ _attribute_map = {
+ "defender": {"key": "defender", "type": "ManagedClusterSecurityProfileDefender"},
+ "azure_key_vault_kms": {"key": "azureKeyVaultKms", "type": "AzureKeyVaultKms"},
+ "workload_identity": {"key": "workloadIdentity", "type": "ManagedClusterSecurityProfileWorkloadIdentity"},
+ "image_cleaner": {"key": "imageCleaner", "type": "ManagedClusterSecurityProfileImageCleaner"},
+ "custom_ca_trust_certificates": {"key": "customCATrustCertificates", "type": "[bytearray]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ defender: Optional["_models.ManagedClusterSecurityProfileDefender"] = None,
+ azure_key_vault_kms: Optional["_models.AzureKeyVaultKms"] = None,
+ workload_identity: Optional["_models.ManagedClusterSecurityProfileWorkloadIdentity"] = None,
+ image_cleaner: Optional["_models.ManagedClusterSecurityProfileImageCleaner"] = None,
+ custom_ca_trust_certificates: Optional[list[bytes]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword defender: Microsoft Defender settings for the security profile.
+ :paramtype defender: ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileDefender
+ :keyword azure_key_vault_kms: Azure Key Vault `key management service
+ `_ settings for the security
+ profile.
+ :paramtype azure_key_vault_kms: ~azure.mgmt.containerservice.models.AzureKeyVaultKms
+ :keyword workload_identity: Workload identity settings for the security profile. Workload
+ identity enables Kubernetes applications to access Azure cloud resources securely with Azure
+ AD. See https://aka.ms/aks/wi for more details.
+ :paramtype workload_identity:
+ ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileWorkloadIdentity
+ :keyword image_cleaner: Image Cleaner settings for the security profile.
+ :paramtype image_cleaner:
+ ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileImageCleaner
+ :keyword custom_ca_trust_certificates: A list of up to 10 base64 encoded CAs that will be added
+ to the trust store on all nodes in the cluster. For more information see `Custom CA Trust
+ Certificates `_.
+ :paramtype custom_ca_trust_certificates: list[bytes]
+ """
+ super().__init__(**kwargs)
+ self.defender = defender
+ self.azure_key_vault_kms = azure_key_vault_kms
+ self.workload_identity = workload_identity
+ self.image_cleaner = image_cleaner
+ self.custom_ca_trust_certificates = custom_ca_trust_certificates
+
+
+class ManagedClusterSecurityProfileDefender(_serialization.Model):
+ """Microsoft Defender settings for the security profile.
+
+ :ivar log_analytics_workspace_resource_id: Resource ID of the Log Analytics workspace to be
+ associated with Microsoft Defender. When Microsoft Defender is enabled, this field is required
+ and must be a valid workspace resource ID. When Microsoft Defender is disabled, leave the field
+ empty.
+ :vartype log_analytics_workspace_resource_id: str
+ :ivar security_monitoring: Microsoft Defender threat detection for Cloud settings for the
+ security profile.
+ :vartype security_monitoring:
+ ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring
+ """
+
+ _attribute_map = {
+ "log_analytics_workspace_resource_id": {"key": "logAnalyticsWorkspaceResourceId", "type": "str"},
+ "security_monitoring": {
+ "key": "securityMonitoring",
+ "type": "ManagedClusterSecurityProfileDefenderSecurityMonitoring",
+ },
+ }
+
+ def __init__(
+ self,
+ *,
+ log_analytics_workspace_resource_id: Optional[str] = None,
+ security_monitoring: Optional["_models.ManagedClusterSecurityProfileDefenderSecurityMonitoring"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword log_analytics_workspace_resource_id: Resource ID of the Log Analytics workspace to be
+ associated with Microsoft Defender. When Microsoft Defender is enabled, this field is required
+ and must be a valid workspace resource ID. When Microsoft Defender is disabled, leave the field
+ empty.
+ :paramtype log_analytics_workspace_resource_id: str
+ :keyword security_monitoring: Microsoft Defender threat detection for Cloud settings for the
+ security profile.
+ :paramtype security_monitoring:
+ ~azure.mgmt.containerservice.models.ManagedClusterSecurityProfileDefenderSecurityMonitoring
+ """
+ super().__init__(**kwargs)
+ self.log_analytics_workspace_resource_id = log_analytics_workspace_resource_id
+ self.security_monitoring = security_monitoring
+
+
+class ManagedClusterSecurityProfileDefenderSecurityMonitoring(_serialization.Model): # pylint: disable=name-too-long
+ """Microsoft Defender settings for the security profile threat detection.
+
+ :ivar enabled: Whether to enable Defender threat detection.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable Defender threat detection.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterSecurityProfileImageCleaner(_serialization.Model): # pylint: disable=name-too-long
+ """Image Cleaner removes unused images from nodes, freeing up disk space and helping to reduce
+ attack surface area. Here are settings for the security profile.
+
+ :ivar enabled: Whether to enable Image Cleaner on AKS cluster.
+ :vartype enabled: bool
+ :ivar interval_hours: Image Cleaner scanning interval in hours.
+ :vartype interval_hours: int
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "interval_hours": {"key": "intervalHours", "type": "int"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, interval_hours: Optional[int] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable Image Cleaner on AKS cluster.
+ :paramtype enabled: bool
+ :keyword interval_hours: Image Cleaner scanning interval in hours.
+ :paramtype interval_hours: int
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.interval_hours = interval_hours
+
+
+class ManagedClusterSecurityProfileWorkloadIdentity(_serialization.Model): # pylint: disable=name-too-long
+ """Workload identity settings for the security profile.
+
+ :ivar enabled: Whether to enable workload identity.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable workload identity.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterServicePrincipalProfile(_serialization.Model):
+ """Information about a service principal identity for the cluster to use for manipulating Azure
+ APIs.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar client_id: The ID for the service principal. Required.
+ :vartype client_id: str
+ :ivar secret: The secret password associated with the service principal in plain text.
+ :vartype secret: str
+ """
+
+ _validation = {
+ "client_id": {"required": True},
+ }
+
+ _attribute_map = {
+ "client_id": {"key": "clientId", "type": "str"},
+ "secret": {"key": "secret", "type": "str"},
+ }
+
+ def __init__(self, *, client_id: str, secret: Optional[str] = None, **kwargs: Any) -> None:
+ """
+ :keyword client_id: The ID for the service principal. Required.
+ :paramtype client_id: str
+ :keyword secret: The secret password associated with the service principal in plain text.
+ :paramtype secret: str
+ """
+ super().__init__(**kwargs)
+ self.client_id = client_id
+ self.secret = secret
+
+
+class ManagedClusterSKU(_serialization.Model):
+ """The SKU of a Managed Cluster.
+
+ :ivar name: The name of a managed cluster SKU. Known values are: "Base" and "Automatic".
+ :vartype name: str or ~azure.mgmt.containerservice.models.ManagedClusterSKUName
+ :ivar tier: The tier of a managed cluster SKU. If not specified, the default is 'Free'. See
+ `AKS Pricing Tier `_ for
+ more details. Known values are: "Premium", "Standard", and "Free".
+ :vartype tier: str or ~azure.mgmt.containerservice.models.ManagedClusterSKUTier
+ """
+
+ _attribute_map = {
+ "name": {"key": "name", "type": "str"},
+ "tier": {"key": "tier", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[Union[str, "_models.ManagedClusterSKUName"]] = None,
+ tier: Optional[Union[str, "_models.ManagedClusterSKUTier"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword name: The name of a managed cluster SKU. Known values are: "Base" and "Automatic".
+ :paramtype name: str or ~azure.mgmt.containerservice.models.ManagedClusterSKUName
+ :keyword tier: The tier of a managed cluster SKU. If not specified, the default is 'Free'. See
+ `AKS Pricing Tier `_ for
+ more details. Known values are: "Premium", "Standard", and "Free".
+ :paramtype tier: str or ~azure.mgmt.containerservice.models.ManagedClusterSKUTier
+ """
+ super().__init__(**kwargs)
+ self.name = name
+ self.tier = tier
+
+
+class ManagedClusterStaticEgressGatewayProfile(_serialization.Model):
+ """The Static Egress Gateway addon configuration for the cluster.
+
+ :ivar enabled: Enable Static Egress Gateway addon. Indicates if Static Egress Gateway addon is
+ enabled or not.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Enable Static Egress Gateway addon. Indicates if Static Egress Gateway addon
+ is enabled or not.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterStatus(_serialization.Model):
+ """Contains read-only information about the Managed Cluster.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar provisioning_error: The error details information of the managed cluster. Preserves the
+ detailed info of failure. If there was no error, this field is omitted.
+ :vartype provisioning_error: ~azure.mgmt.containerservice.models.ErrorDetail
+ """
+
+ _validation = {
+ "provisioning_error": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "provisioning_error": {"key": "provisioningError", "type": "ErrorDetail"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.provisioning_error: Optional["_models.ErrorDetail"] = None
+
+
+class ManagedClusterStorageProfile(_serialization.Model):
+ """Storage profile for the container service cluster.
+
+ :ivar disk_csi_driver: AzureDisk CSI Driver settings for the storage profile.
+ :vartype disk_csi_driver:
+ ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileDiskCSIDriver
+ :ivar file_csi_driver: AzureFile CSI Driver settings for the storage profile.
+ :vartype file_csi_driver:
+ ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileFileCSIDriver
+ :ivar snapshot_controller: Snapshot Controller settings for the storage profile.
+ :vartype snapshot_controller:
+ ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileSnapshotController
+ :ivar blob_csi_driver: AzureBlob CSI Driver settings for the storage profile.
+ :vartype blob_csi_driver:
+ ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileBlobCSIDriver
+ """
+
+ _attribute_map = {
+ "disk_csi_driver": {"key": "diskCSIDriver", "type": "ManagedClusterStorageProfileDiskCSIDriver"},
+ "file_csi_driver": {"key": "fileCSIDriver", "type": "ManagedClusterStorageProfileFileCSIDriver"},
+ "snapshot_controller": {"key": "snapshotController", "type": "ManagedClusterStorageProfileSnapshotController"},
+ "blob_csi_driver": {"key": "blobCSIDriver", "type": "ManagedClusterStorageProfileBlobCSIDriver"},
+ }
+
+ def __init__(
+ self,
+ *,
+ disk_csi_driver: Optional["_models.ManagedClusterStorageProfileDiskCSIDriver"] = None,
+ file_csi_driver: Optional["_models.ManagedClusterStorageProfileFileCSIDriver"] = None,
+ snapshot_controller: Optional["_models.ManagedClusterStorageProfileSnapshotController"] = None,
+ blob_csi_driver: Optional["_models.ManagedClusterStorageProfileBlobCSIDriver"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword disk_csi_driver: AzureDisk CSI Driver settings for the storage profile.
+ :paramtype disk_csi_driver:
+ ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileDiskCSIDriver
+ :keyword file_csi_driver: AzureFile CSI Driver settings for the storage profile.
+ :paramtype file_csi_driver:
+ ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileFileCSIDriver
+ :keyword snapshot_controller: Snapshot Controller settings for the storage profile.
+ :paramtype snapshot_controller:
+ ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileSnapshotController
+ :keyword blob_csi_driver: AzureBlob CSI Driver settings for the storage profile.
+ :paramtype blob_csi_driver:
+ ~azure.mgmt.containerservice.models.ManagedClusterStorageProfileBlobCSIDriver
+ """
+ super().__init__(**kwargs)
+ self.disk_csi_driver = disk_csi_driver
+ self.file_csi_driver = file_csi_driver
+ self.snapshot_controller = snapshot_controller
+ self.blob_csi_driver = blob_csi_driver
+
+
+class ManagedClusterStorageProfileBlobCSIDriver(_serialization.Model): # pylint: disable=name-too-long
+ """AzureBlob CSI Driver settings for the storage profile.
+
+ :ivar enabled: Whether to enable AzureBlob CSI Driver. The default value is false.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable AzureBlob CSI Driver. The default value is false.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterStorageProfileDiskCSIDriver(_serialization.Model): # pylint: disable=name-too-long
+ """AzureDisk CSI Driver settings for the storage profile.
+
+ :ivar enabled: Whether to enable AzureDisk CSI Driver. The default value is true.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable AzureDisk CSI Driver. The default value is true.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterStorageProfileFileCSIDriver(_serialization.Model): # pylint: disable=name-too-long
+ """AzureFile CSI Driver settings for the storage profile.
+
+ :ivar enabled: Whether to enable AzureFile CSI Driver. The default value is true.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable AzureFile CSI Driver. The default value is true.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterStorageProfileSnapshotController(_serialization.Model): # pylint: disable=name-too-long
+ """Snapshot Controller settings for the storage profile.
+
+ :ivar enabled: Whether to enable Snapshot Controller. The default value is true.
+ :vartype enabled: bool
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable Snapshot Controller. The default value is true.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterUpgradeProfile(_serialization.Model):
+ """The list of available upgrades for compute pools.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: The ID of the upgrade profile.
+ :vartype id: str
+ :ivar name: The name of the upgrade profile.
+ :vartype name: str
+ :ivar type: The type of the upgrade profile.
+ :vartype type: str
+ :ivar control_plane_profile: The list of available upgrade versions for the control plane.
+ Required.
+ :vartype control_plane_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile
+ :ivar agent_pool_profiles: The list of available upgrade versions for agent pools. Required.
+ :vartype agent_pool_profiles:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile]
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "control_plane_profile": {"required": True},
+ "agent_pool_profiles": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "control_plane_profile": {"key": "properties.controlPlaneProfile", "type": "ManagedClusterPoolUpgradeProfile"},
+ "agent_pool_profiles": {"key": "properties.agentPoolProfiles", "type": "[ManagedClusterPoolUpgradeProfile]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ control_plane_profile: "_models.ManagedClusterPoolUpgradeProfile",
+ agent_pool_profiles: list["_models.ManagedClusterPoolUpgradeProfile"],
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword control_plane_profile: The list of available upgrade versions for the control plane.
+ Required.
+ :paramtype control_plane_profile:
+ ~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile
+ :keyword agent_pool_profiles: The list of available upgrade versions for agent pools. Required.
+ :paramtype agent_pool_profiles:
+ list[~azure.mgmt.containerservice.models.ManagedClusterPoolUpgradeProfile]
+ """
+ super().__init__(**kwargs)
+ self.id: Optional[str] = None
+ self.name: Optional[str] = None
+ self.type: Optional[str] = None
+ self.control_plane_profile = control_plane_profile
+ self.agent_pool_profiles = agent_pool_profiles
+
+
+class ManagedClusterWindowsProfile(_serialization.Model):
+ """Profile for Windows VMs in the managed cluster.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar admin_username: Specifies the name of the administrator account. :code:`
`\\
+ :code:`
` **Restriction:** Cannot end in "." :code:`
`\\ :code:`
` **Disallowed
+ values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3",
+ "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david",
+ "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys",
+ "test2", "test3", "user4", "user5". :code:`
`\\ :code:`
` **Minimum-length:** 1 character
+ :code:`
`\\ :code:`
` **Max-length:** 20 characters. Required.
+ :vartype admin_username: str
+ :ivar admin_password: Specifies the password of the administrator account. :code:`
`\\
+ :code:`
` **Minimum-length:** 8 characters :code:`
`\\ :code:`
` **Max-length:** 123
+ characters :code:`
`\\ :code:`
` **Complexity requirements:** 3 out of 4 conditions below
+ need to be fulfilled :code:`
` Has lower characters :code:`
`Has upper characters
+ :code:`
` Has a digit :code:`
` Has a special character (Regex match [\\W_])
+ :code:`
`\\ :code:`
` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd",
+ "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!".
+ :vartype admin_password: str
+ :ivar license_type: The license type to use for Windows VMs. See `Azure Hybrid User Benefits
+ `_ for more details. Known values are:
+ "None" and "Windows_Server".
+ :vartype license_type: str or ~azure.mgmt.containerservice.models.LicenseType
+ :ivar enable_csi_proxy: Whether to enable CSI proxy. For more details on CSI proxy, see the
+ `CSI proxy GitHub repo `_.
+ :vartype enable_csi_proxy: bool
+ :ivar gmsa_profile: The Windows gMSA Profile in the Managed Cluster.
+ :vartype gmsa_profile: ~azure.mgmt.containerservice.models.WindowsGmsaProfile
+ """
+
+ _validation = {
+ "admin_username": {"required": True},
+ }
+
+ _attribute_map = {
+ "admin_username": {"key": "adminUsername", "type": "str"},
+ "admin_password": {"key": "adminPassword", "type": "str"},
+ "license_type": {"key": "licenseType", "type": "str"},
+ "enable_csi_proxy": {"key": "enableCSIProxy", "type": "bool"},
+ "gmsa_profile": {"key": "gmsaProfile", "type": "WindowsGmsaProfile"},
+ }
+
+ def __init__(
+ self,
+ *,
+ admin_username: str,
+ admin_password: Optional[str] = None,
+ license_type: Optional[Union[str, "_models.LicenseType"]] = None,
+ enable_csi_proxy: Optional[bool] = None,
+ gmsa_profile: Optional["_models.WindowsGmsaProfile"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword admin_username: Specifies the name of the administrator account. :code:`
`\\
+ :code:`
` **Restriction:** Cannot end in "." :code:`
`\\ :code:`
` **Disallowed
+ values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3",
+ "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david",
+ "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys",
+ "test2", "test3", "user4", "user5". :code:`
`\\ :code:`
` **Minimum-length:** 1 character
+ :code:`
`\\ :code:`
` **Max-length:** 20 characters. Required.
+ :paramtype admin_username: str
+ :keyword admin_password: Specifies the password of the administrator account. :code:`
`\\
+ :code:`
` **Minimum-length:** 8 characters :code:`
`\\ :code:`
` **Max-length:** 123
+ characters :code:`
`\\ :code:`
` **Complexity requirements:** 3 out of 4 conditions below
+ need to be fulfilled :code:`
` Has lower characters :code:`
`Has upper characters
+ :code:`
` Has a digit :code:`
` Has a special character (Regex match [\\W_])
+ :code:`
`\\ :code:`
` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd",
+ "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!".
+ :paramtype admin_password: str
+ :keyword license_type: The license type to use for Windows VMs. See `Azure Hybrid User Benefits
+ `_ for more details. Known values are:
+ "None" and "Windows_Server".
+ :paramtype license_type: str or ~azure.mgmt.containerservice.models.LicenseType
+ :keyword enable_csi_proxy: Whether to enable CSI proxy. For more details on CSI proxy, see the
+ `CSI proxy GitHub repo `_.
+ :paramtype enable_csi_proxy: bool
+ :keyword gmsa_profile: The Windows gMSA Profile in the Managed Cluster.
+ :paramtype gmsa_profile: ~azure.mgmt.containerservice.models.WindowsGmsaProfile
+ """
+ super().__init__(**kwargs)
+ self.admin_username = admin_username
+ self.admin_password = admin_password
+ self.license_type = license_type
+ self.enable_csi_proxy = enable_csi_proxy
+ self.gmsa_profile = gmsa_profile
+
+
+class ManagedClusterWorkloadAutoScalerProfile(_serialization.Model):
+ """Workload Auto-scaler profile for the managed cluster.
+
+ :ivar keda: KEDA (Kubernetes Event-driven Autoscaling) settings for the workload auto-scaler
+ profile.
+ :vartype keda: ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfileKeda
+ :ivar vertical_pod_autoscaler: VPA (Vertical Pod Autoscaler) settings for the workload
+ auto-scaler profile.
+ :vartype vertical_pod_autoscaler:
+ ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler
+ """
+
+ _attribute_map = {
+ "keda": {"key": "keda", "type": "ManagedClusterWorkloadAutoScalerProfileKeda"},
+ "vertical_pod_autoscaler": {
+ "key": "verticalPodAutoscaler",
+ "type": "ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler",
+ },
+ }
+
+ def __init__(
+ self,
+ *,
+ keda: Optional["_models.ManagedClusterWorkloadAutoScalerProfileKeda"] = None,
+ vertical_pod_autoscaler: Optional[
+ "_models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler"
+ ] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword keda: KEDA (Kubernetes Event-driven Autoscaling) settings for the workload auto-scaler
+ profile.
+ :paramtype keda:
+ ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfileKeda
+ :keyword vertical_pod_autoscaler: VPA (Vertical Pod Autoscaler) settings for the workload
+ auto-scaler profile.
+ :paramtype vertical_pod_autoscaler:
+ ~azure.mgmt.containerservice.models.ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler
+ """
+ super().__init__(**kwargs)
+ self.keda = keda
+ self.vertical_pod_autoscaler = vertical_pod_autoscaler
+
+
+class ManagedClusterWorkloadAutoScalerProfileKeda(_serialization.Model): # pylint: disable=name-too-long
+ """KEDA (Kubernetes Event-driven Autoscaling) settings for the workload auto-scaler profile.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar enabled: Whether to enable KEDA. Required.
+ :vartype enabled: bool
+ """
+
+ _validation = {
+ "enabled": {"required": True},
+ }
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: bool, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable KEDA. Required.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler(
+ _serialization.Model
+): # pylint: disable=name-too-long
+ """VPA (Vertical Pod Autoscaler) settings for the workload auto-scaler profile.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar enabled: Whether to enable VPA. Default value is false. Required.
+ :vartype enabled: bool
+ """
+
+ _validation = {
+ "enabled": {"required": True},
+ }
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ }
+
+ def __init__(self, *, enabled: bool = False, **kwargs: Any) -> None:
+ """
+ :keyword enabled: Whether to enable VPA. Default value is false. Required.
+ :paramtype enabled: bool
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+
+
+class ManagedNamespace(SubResource):
+ """Namespace managed by ARM.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Resource ID.
+ :vartype id: str
+ :ivar name: The name of the resource that is unique within a resource group. This name can be
+ used to access the resource.
+ :vartype name: str
+ :ivar type: Resource type.
+ :vartype type: str
+ :ivar system_data: The system metadata relating to this resource.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar tags: The tags to be persisted on the managed cluster namespace.
+ :vartype tags: dict[str, str]
+ :ivar e_tag: Unique read-only string used to implement optimistic concurrency. The eTag value
+ will change when the resource is updated. Specify an if-match or if-none-match header with the
+ eTag value for a subsequent request to enable optimistic concurrency per the normal eTag
+ convention.
+ :vartype e_tag: str
+ :ivar location: The location of the namespace.
+ :vartype location: str
+ :ivar properties: Properties of a namespace.
+ :vartype properties: ~azure.mgmt.containerservice.models.NamespaceProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "e_tag": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "e_tag": {"key": "eTag", "type": "str"},
+ "location": {"key": "location", "type": "str"},
+ "properties": {"key": "properties", "type": "NamespaceProperties"},
+ }
+
+ def __init__(
+ self,
+ *,
+ tags: Optional[dict[str, str]] = None,
+ location: Optional[str] = None,
+ properties: Optional["_models.NamespaceProperties"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword tags: The tags to be persisted on the managed cluster namespace.
+ :paramtype tags: dict[str, str]
+ :keyword location: The location of the namespace.
+ :paramtype location: str
+ :keyword properties: Properties of a namespace.
+ :paramtype properties: ~azure.mgmt.containerservice.models.NamespaceProperties
+ """
+ super().__init__(**kwargs)
+ self.system_data: Optional["_models.SystemData"] = None
+ self.tags = tags
+ self.e_tag: Optional[str] = None
+ self.location = location
+ self.properties = properties
+
+
+class ManagedNamespaceListResult(_serialization.Model):
+ """The result of a request to list managed namespaces in a managed cluster.
+
+ :ivar value: The list of managed namespaces.
+ :vartype value: list[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :ivar next_link: The URI to fetch the next page of results, if any.
+ :vartype next_link: str
+ """
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[ManagedNamespace]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ value: Optional[list["_models.ManagedNamespace"]] = None,
+ next_link: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword value: The list of managed namespaces.
+ :paramtype value: list[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :keyword next_link: The URI to fetch the next page of results, if any.
+ :paramtype next_link: str
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link = next_link
+
+
+class ManagedServiceIdentityUserAssignedIdentitiesValue(_serialization.Model): # pylint: disable=name-too-long
+ """ManagedServiceIdentityUserAssignedIdentitiesValue.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar principal_id: The principal id of user assigned identity.
+ :vartype principal_id: str
+ :ivar client_id: The client id of user assigned identity.
+ :vartype client_id: str
+ """
+
+ _validation = {
+ "principal_id": {"readonly": True},
+ "client_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "principal_id": {"key": "principalId", "type": "str"},
+ "client_id": {"key": "clientId", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.principal_id: Optional[str] = None
+ self.client_id: Optional[str] = None
+
+
+class ManualScaleProfile(_serialization.Model):
+ """Specifications on number of machines.
+
+ :ivar size: VM size that AKS will use when creating and scaling e.g. 'Standard_E4s_v3',
+ 'Standard_E16s_v3' or 'Standard_D16s_v5'.
+ :vartype size: str
+ :ivar count: Number of nodes.
+ :vartype count: int
+ """
+
+ _attribute_map = {
+ "size": {"key": "size", "type": "str"},
+ "count": {"key": "count", "type": "int"},
+ }
+
+ def __init__(self, *, size: Optional[str] = None, count: Optional[int] = None, **kwargs: Any) -> None:
+ """
+ :keyword size: VM size that AKS will use when creating and scaling e.g. 'Standard_E4s_v3',
+ 'Standard_E16s_v3' or 'Standard_D16s_v5'.
+ :paramtype size: str
+ :keyword count: Number of nodes.
+ :paramtype count: int
+ """
+ super().__init__(**kwargs)
+ self.size = size
+ self.count = count
+
+
+class MeshRevision(_serialization.Model):
+ """Holds information on upgrades and compatibility for given major.minor mesh release.
+
+ :ivar revision: The revision of the mesh release.
+ :vartype revision: str
+ :ivar upgrades: List of revisions available for upgrade of a specific mesh revision.
+ :vartype upgrades: list[str]
+ :ivar compatible_with: List of items this revision of service mesh is compatible with, and
+ their associated versions.
+ :vartype compatible_with: list[~azure.mgmt.containerservice.models.CompatibleVersions]
+ """
+
+ _attribute_map = {
+ "revision": {"key": "revision", "type": "str"},
+ "upgrades": {"key": "upgrades", "type": "[str]"},
+ "compatible_with": {"key": "compatibleWith", "type": "[CompatibleVersions]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ revision: Optional[str] = None,
+ upgrades: Optional[list[str]] = None,
+ compatible_with: Optional[list["_models.CompatibleVersions"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword revision: The revision of the mesh release.
+ :paramtype revision: str
+ :keyword upgrades: List of revisions available for upgrade of a specific mesh revision.
+ :paramtype upgrades: list[str]
+ :keyword compatible_with: List of items this revision of service mesh is compatible with, and
+ their associated versions.
+ :paramtype compatible_with: list[~azure.mgmt.containerservice.models.CompatibleVersions]
+ """
+ super().__init__(**kwargs)
+ self.revision = revision
+ self.upgrades = upgrades
+ self.compatible_with = compatible_with
+
+
+class ProxyResource(Resource):
+ """The resource model definition for a Azure Resource Manager proxy resource. It will not have
+ tags and a location.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ """
+
+
+class MeshRevisionProfile(ProxyResource):
+ """Mesh revision profile for a mesh.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar properties: Mesh revision profile properties for a mesh.
+ :vartype properties: ~azure.mgmt.containerservice.models.MeshRevisionProfileProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "MeshRevisionProfileProperties"},
+ }
+
+ def __init__(self, *, properties: Optional["_models.MeshRevisionProfileProperties"] = None, **kwargs: Any) -> None:
+ """
+ :keyword properties: Mesh revision profile properties for a mesh.
+ :paramtype properties: ~azure.mgmt.containerservice.models.MeshRevisionProfileProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class MeshRevisionProfileList(_serialization.Model):
+ """Holds an array of MeshRevisionsProfiles.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: Array of service mesh add-on revision profiles for all supported mesh modes.
+ :vartype value: list[~azure.mgmt.containerservice.models.MeshRevisionProfile]
+ :ivar next_link: The URL to get the next set of mesh revision profile.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[MeshRevisionProfile]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.MeshRevisionProfile"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: Array of service mesh add-on revision profiles for all supported mesh modes.
+ :paramtype value: list[~azure.mgmt.containerservice.models.MeshRevisionProfile]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link: Optional[str] = None
+
+
+class MeshRevisionProfileProperties(_serialization.Model):
+ """Mesh revision profile properties for a mesh.
+
+ :ivar mesh_revisions:
+ :vartype mesh_revisions: list[~azure.mgmt.containerservice.models.MeshRevision]
+ """
+
+ _attribute_map = {
+ "mesh_revisions": {"key": "meshRevisions", "type": "[MeshRevision]"},
+ }
+
+ def __init__(self, *, mesh_revisions: Optional[list["_models.MeshRevision"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword mesh_revisions:
+ :paramtype mesh_revisions: list[~azure.mgmt.containerservice.models.MeshRevision]
+ """
+ super().__init__(**kwargs)
+ self.mesh_revisions = mesh_revisions
+
+
+class MeshUpgradeProfile(ProxyResource):
+ """Upgrade profile for given mesh.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar properties: Mesh upgrade profile properties for a major.minor release.
+ :vartype properties: ~azure.mgmt.containerservice.models.MeshUpgradeProfileProperties
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "properties": {"key": "properties", "type": "MeshUpgradeProfileProperties"},
+ }
+
+ def __init__(self, *, properties: Optional["_models.MeshUpgradeProfileProperties"] = None, **kwargs: Any) -> None:
+ """
+ :keyword properties: Mesh upgrade profile properties for a major.minor release.
+ :paramtype properties: ~azure.mgmt.containerservice.models.MeshUpgradeProfileProperties
+ """
+ super().__init__(**kwargs)
+ self.properties = properties
+
+
+class MeshUpgradeProfileList(_serialization.Model):
+ """Holds an array of MeshUpgradeProfiles.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: Array of supported service mesh add-on upgrade profiles.
+ :vartype value: list[~azure.mgmt.containerservice.models.MeshUpgradeProfile]
+ :ivar next_link: The URL to get the next set of mesh upgrade profile.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[MeshUpgradeProfile]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.MeshUpgradeProfile"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: Array of supported service mesh add-on upgrade profiles.
+ :paramtype value: list[~azure.mgmt.containerservice.models.MeshUpgradeProfile]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link: Optional[str] = None
+
+
+class MeshUpgradeProfileProperties(MeshRevision):
+ """Mesh upgrade profile properties for a major.minor release.
+
+ :ivar revision: The revision of the mesh release.
+ :vartype revision: str
+ :ivar upgrades: List of revisions available for upgrade of a specific mesh revision.
+ :vartype upgrades: list[str]
+ :ivar compatible_with: List of items this revision of service mesh is compatible with, and
+ their associated versions.
+ :vartype compatible_with: list[~azure.mgmt.containerservice.models.CompatibleVersions]
+ """
+
+
+class NamespaceProperties(_serialization.Model):
+ """Properties of a namespace managed by ARM.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar provisioning_state: The current provisioning state of the namespace. Known values are:
+ "Updating", "Deleting", "Creating", "Succeeded", "Failed", and "Canceled".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.containerservice.models.NamespaceProvisioningState
+ :ivar labels: The labels of managed namespace.
+ :vartype labels: dict[str, str]
+ :ivar annotations: The annotations of managed namespace.
+ :vartype annotations: dict[str, str]
+ :ivar portal_fqdn: The special FQDN used by the Azure Portal to access the Managed Cluster.
+ This FQDN is for use only by the Azure Portal and should not be used by other clients. The
+ Azure Portal requires certain Cross-Origin Resource Sharing (CORS) headers to be sent in some
+ responses, which Kubernetes APIServer doesn't handle by default. This special FQDN supports
+ CORS, allowing the Azure Portal to function properly.
+ :vartype portal_fqdn: str
+ :ivar default_resource_quota: The default resource quota enforced upon the namespace. Customers
+ can have other Kubernetes resource quota objects under the namespace. Resource quotas are
+ additive; if multiple resource quotas are applied to a given namespace, then the effective
+ limit will be one such that all quotas on the namespace can be satisfied.
+ :vartype default_resource_quota: ~azure.mgmt.containerservice.models.ResourceQuota
+ :ivar default_network_policy: The default network policy enforced upon the namespace. Customers
+ can have other Kubernetes network policy objects under the namespace. Network policies are
+ additive; if a policy or policies apply to a given pod for a given direction, the connections
+ allowed in that direction for the pod is the union of what all applicable policies allow.
+ :vartype default_network_policy: ~azure.mgmt.containerservice.models.NetworkPolicies
+ :ivar adoption_policy: Action if Kubernetes namespace with same name already exists. Known
+ values are: "Never", "IfIdentical", and "Always".
+ :vartype adoption_policy: str or ~azure.mgmt.containerservice.models.AdoptionPolicy
+ :ivar delete_policy: Delete options of a namespace. Known values are: "Keep" and "Delete".
+ :vartype delete_policy: str or ~azure.mgmt.containerservice.models.DeletePolicy
+ """
+
+ _validation = {
+ "provisioning_state": {"readonly": True},
+ "portal_fqdn": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "provisioning_state": {"key": "provisioningState", "type": "str"},
+ "labels": {"key": "labels", "type": "{str}"},
+ "annotations": {"key": "annotations", "type": "{str}"},
+ "portal_fqdn": {"key": "portalFqdn", "type": "str"},
+ "default_resource_quota": {"key": "defaultResourceQuota", "type": "ResourceQuota"},
+ "default_network_policy": {"key": "defaultNetworkPolicy", "type": "NetworkPolicies"},
+ "adoption_policy": {"key": "adoptionPolicy", "type": "str"},
+ "delete_policy": {"key": "deletePolicy", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ labels: Optional[dict[str, str]] = None,
+ annotations: Optional[dict[str, str]] = None,
+ default_resource_quota: Optional["_models.ResourceQuota"] = None,
+ default_network_policy: Optional["_models.NetworkPolicies"] = None,
+ adoption_policy: Optional[Union[str, "_models.AdoptionPolicy"]] = None,
+ delete_policy: Optional[Union[str, "_models.DeletePolicy"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword labels: The labels of managed namespace.
+ :paramtype labels: dict[str, str]
+ :keyword annotations: The annotations of managed namespace.
+ :paramtype annotations: dict[str, str]
+ :keyword default_resource_quota: The default resource quota enforced upon the namespace.
+ Customers can have other Kubernetes resource quota objects under the namespace. Resource quotas
+ are additive; if multiple resource quotas are applied to a given namespace, then the effective
+ limit will be one such that all quotas on the namespace can be satisfied.
+ :paramtype default_resource_quota: ~azure.mgmt.containerservice.models.ResourceQuota
+ :keyword default_network_policy: The default network policy enforced upon the namespace.
+ Customers can have other Kubernetes network policy objects under the namespace. Network
+ policies are additive; if a policy or policies apply to a given pod for a given direction, the
+ connections allowed in that direction for the pod is the union of what all applicable policies
+ allow.
+ :paramtype default_network_policy: ~azure.mgmt.containerservice.models.NetworkPolicies
+ :keyword adoption_policy: Action if Kubernetes namespace with same name already exists. Known
+ values are: "Never", "IfIdentical", and "Always".
+ :paramtype adoption_policy: str or ~azure.mgmt.containerservice.models.AdoptionPolicy
+ :keyword delete_policy: Delete options of a namespace. Known values are: "Keep" and "Delete".
+ :paramtype delete_policy: str or ~azure.mgmt.containerservice.models.DeletePolicy
+ """
+ super().__init__(**kwargs)
+ self.provisioning_state: Optional[Union[str, "_models.NamespaceProvisioningState"]] = None
+ self.labels = labels
+ self.annotations = annotations
+ self.portal_fqdn: Optional[str] = None
+ self.default_resource_quota = default_resource_quota
+ self.default_network_policy = default_network_policy
+ self.adoption_policy = adoption_policy
+ self.delete_policy = delete_policy
+
+
+class NetworkPolicies(_serialization.Model):
+ """Default network policy of the namespace, specifying ingress and egress rules.
+
+ :ivar ingress: Ingress policy for the network. Known values are: "DenyAll", "AllowAll", and
+ "AllowSameNamespace".
+ :vartype ingress: str or ~azure.mgmt.containerservice.models.PolicyRule
+ :ivar egress: Egress policy for the network. Known values are: "DenyAll", "AllowAll", and
+ "AllowSameNamespace".
+ :vartype egress: str or ~azure.mgmt.containerservice.models.PolicyRule
+ """
+
+ _attribute_map = {
+ "ingress": {"key": "ingress", "type": "str"},
+ "egress": {"key": "egress", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ ingress: Optional[Union[str, "_models.PolicyRule"]] = None,
+ egress: Optional[Union[str, "_models.PolicyRule"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword ingress: Ingress policy for the network. Known values are: "DenyAll", "AllowAll", and
+ "AllowSameNamespace".
+ :paramtype ingress: str or ~azure.mgmt.containerservice.models.PolicyRule
+ :keyword egress: Egress policy for the network. Known values are: "DenyAll", "AllowAll", and
+ "AllowSameNamespace".
+ :paramtype egress: str or ~azure.mgmt.containerservice.models.PolicyRule
+ """
+ super().__init__(**kwargs)
+ self.ingress = ingress
+ self.egress = egress
+
+
+class OperationListResult(_serialization.Model):
+ """The List Operation response.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of operations.
+ :vartype value: list[~azure.mgmt.containerservice.models.OperationValue]
+ """
+
+ _validation = {
+ "value": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[OperationValue]"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value: Optional[list["_models.OperationValue"]] = None
+
+
+class OperationValue(_serialization.Model):
+ """Describes the properties of a Operation value.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar origin: The origin of the operation.
+ :vartype origin: str
+ :ivar name: The name of the operation.
+ :vartype name: str
+ :ivar operation: The display name of the operation.
+ :vartype operation: str
+ :ivar resource: The display name of the resource the operation applies to.
+ :vartype resource: str
+ :ivar description: The description of the operation.
+ :vartype description: str
+ :ivar provider: The resource provider for the operation.
+ :vartype provider: str
+ """
+
+ _validation = {
+ "origin": {"readonly": True},
+ "name": {"readonly": True},
+ "operation": {"readonly": True},
+ "resource": {"readonly": True},
+ "description": {"readonly": True},
+ "provider": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "origin": {"key": "origin", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "operation": {"key": "display.operation", "type": "str"},
+ "resource": {"key": "display.resource", "type": "str"},
+ "description": {"key": "display.description", "type": "str"},
+ "provider": {"key": "display.provider", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.origin: Optional[str] = None
+ self.name: Optional[str] = None
+ self.operation: Optional[str] = None
+ self.resource: Optional[str] = None
+ self.description: Optional[str] = None
+ self.provider: Optional[str] = None
+
+
+class OutboundEnvironmentEndpoint(_serialization.Model):
+ """Egress endpoints which AKS agent nodes connect to for common purpose.
+
+ :ivar category: The category of endpoints accessed by the AKS agent node, e.g.
+ azure-resource-management, apiserver, etc.
+ :vartype category: str
+ :ivar endpoints: The endpoints that AKS agent nodes connect to.
+ :vartype endpoints: list[~azure.mgmt.containerservice.models.EndpointDependency]
+ """
+
+ _attribute_map = {
+ "category": {"key": "category", "type": "str"},
+ "endpoints": {"key": "endpoints", "type": "[EndpointDependency]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ category: Optional[str] = None,
+ endpoints: Optional[list["_models.EndpointDependency"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword category: The category of endpoints accessed by the AKS agent node, e.g.
+ azure-resource-management, apiserver, etc.
+ :paramtype category: str
+ :keyword endpoints: The endpoints that AKS agent nodes connect to.
+ :paramtype endpoints: list[~azure.mgmt.containerservice.models.EndpointDependency]
+ """
+ super().__init__(**kwargs)
+ self.category = category
+ self.endpoints = endpoints
+
+
+class OutboundEnvironmentEndpointCollection(_serialization.Model):
+ """Collection of OutboundEnvironmentEndpoint.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar value: Collection of resources. Required.
+ :vartype value: list[~azure.mgmt.containerservice.models.OutboundEnvironmentEndpoint]
+ :ivar next_link: Link to next page of resources.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "value": {"required": True},
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[OutboundEnvironmentEndpoint]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, *, value: list["_models.OutboundEnvironmentEndpoint"], **kwargs: Any) -> None:
+ """
+ :keyword value: Collection of resources. Required.
+ :paramtype value: list[~azure.mgmt.containerservice.models.OutboundEnvironmentEndpoint]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link: Optional[str] = None
+
+
+class PortRange(_serialization.Model):
+ """The port range.
+
+ :ivar port_start: The minimum port that is included in the range. It should be ranged from 1 to
+ 65535, and be less than or equal to portEnd.
+ :vartype port_start: int
+ :ivar port_end: The maximum port that is included in the range. It should be ranged from 1 to
+ 65535, and be greater than or equal to portStart.
+ :vartype port_end: int
+ :ivar protocol: The network protocol of the port. Known values are: "TCP" and "UDP".
+ :vartype protocol: str or ~azure.mgmt.containerservice.models.Protocol
+ """
+
+ _validation = {
+ "port_start": {"maximum": 65535, "minimum": 1},
+ "port_end": {"maximum": 65535, "minimum": 1},
+ }
+
+ _attribute_map = {
+ "port_start": {"key": "portStart", "type": "int"},
+ "port_end": {"key": "portEnd", "type": "int"},
+ "protocol": {"key": "protocol", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ port_start: Optional[int] = None,
+ port_end: Optional[int] = None,
+ protocol: Optional[Union[str, "_models.Protocol"]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword port_start: The minimum port that is included in the range. It should be ranged from 1
+ to 65535, and be less than or equal to portEnd.
+ :paramtype port_start: int
+ :keyword port_end: The maximum port that is included in the range. It should be ranged from 1
+ to 65535, and be greater than or equal to portStart.
+ :paramtype port_end: int
+ :keyword protocol: The network protocol of the port. Known values are: "TCP" and "UDP".
+ :paramtype protocol: str or ~azure.mgmt.containerservice.models.Protocol
+ """
+ super().__init__(**kwargs)
+ self.port_start = port_start
+ self.port_end = port_end
+ self.protocol = protocol
+
+
+class PowerState(_serialization.Model):
+ """Describes the Power State of the cluster.
+
+ :ivar code: Tells whether the cluster is Running or Stopped. Known values are: "Running" and
+ "Stopped".
+ :vartype code: str or ~azure.mgmt.containerservice.models.Code
+ """
+
+ _attribute_map = {
+ "code": {"key": "code", "type": "str"},
+ }
+
+ def __init__(self, *, code: Optional[Union[str, "_models.Code"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword code: Tells whether the cluster is Running or Stopped. Known values are: "Running" and
+ "Stopped".
+ :paramtype code: str or ~azure.mgmt.containerservice.models.Code
+ """
+ super().__init__(**kwargs)
+ self.code = code
+
+
+class PrivateEndpoint(_serialization.Model):
+ """Private endpoint which a connection belongs to.
+
+ :ivar id: The resource ID of the private endpoint.
+ :vartype id: str
+ """
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ }
+
+ def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
+ """
+ :keyword id: The resource ID of the private endpoint.
+ :paramtype id: str
+ """
+ super().__init__(**kwargs)
+ self.id = id
+
+
+class PrivateEndpointConnection(_serialization.Model):
+ """A private endpoint connection.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: The ID of the private endpoint connection.
+ :vartype id: str
+ :ivar name: The name of the private endpoint connection.
+ :vartype name: str
+ :ivar type: The resource type.
+ :vartype type: str
+ :ivar provisioning_state: The current provisioning state. Known values are: "Canceled",
+ "Creating", "Deleting", "Failed", and "Succeeded".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.containerservice.models.PrivateEndpointConnectionProvisioningState
+ :ivar private_endpoint: The resource of private endpoint.
+ :vartype private_endpoint: ~azure.mgmt.containerservice.models.PrivateEndpoint
+ :ivar private_link_service_connection_state: A collection of information about the state of the
+ connection between service consumer and provider.
+ :vartype private_link_service_connection_state:
+ ~azure.mgmt.containerservice.models.PrivateLinkServiceConnectionState
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"},
+ "private_link_service_connection_state": {
+ "key": "properties.privateLinkServiceConnectionState",
+ "type": "PrivateLinkServiceConnectionState",
+ },
+ }
+
+ def __init__(
+ self,
+ *,
+ private_endpoint: Optional["_models.PrivateEndpoint"] = None,
+ private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword private_endpoint: The resource of private endpoint.
+ :paramtype private_endpoint: ~azure.mgmt.containerservice.models.PrivateEndpoint
+ :keyword private_link_service_connection_state: A collection of information about the state of
+ the connection between service consumer and provider.
+ :paramtype private_link_service_connection_state:
+ ~azure.mgmt.containerservice.models.PrivateLinkServiceConnectionState
+ """
+ super().__init__(**kwargs)
+ self.id: Optional[str] = None
+ self.name: Optional[str] = None
+ self.type: Optional[str] = None
+ self.provisioning_state: Optional[Union[str, "_models.PrivateEndpointConnectionProvisioningState"]] = None
+ self.private_endpoint = private_endpoint
+ self.private_link_service_connection_state = private_link_service_connection_state
+
+
+class PrivateEndpointConnectionListResult(_serialization.Model):
+ """A list of private endpoint connections.
+
+ :ivar value: The collection value.
+ :vartype value: list[~azure.mgmt.containerservice.models.PrivateEndpointConnection]
+ """
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[PrivateEndpointConnection]"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.PrivateEndpointConnection"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The collection value.
+ :paramtype value: list[~azure.mgmt.containerservice.models.PrivateEndpointConnection]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+
+
+class PrivateLinkResource(_serialization.Model):
+ """A private link resource.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: The ID of the private link resource.
+ :vartype id: str
+ :ivar name: The name of the private link resource.
+ :vartype name: str
+ :ivar type: The resource type.
+ :vartype type: str
+ :ivar group_id: The group ID of the resource.
+ :vartype group_id: str
+ :ivar required_members: The RequiredMembers of the resource.
+ :vartype required_members: list[str]
+ :ivar private_link_service_id: The private link service ID of the resource, this field is
+ exposed only to NRP internally.
+ :vartype private_link_service_id: str
+ """
+
+ _validation = {
+ "private_link_service_id": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "group_id": {"key": "groupId", "type": "str"},
+ "required_members": {"key": "requiredMembers", "type": "[str]"},
+ "private_link_service_id": {"key": "privateLinkServiceID", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ name: Optional[str] = None,
+ type: Optional[str] = None,
+ group_id: Optional[str] = None,
+ required_members: Optional[list[str]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword id: The ID of the private link resource.
+ :paramtype id: str
+ :keyword name: The name of the private link resource.
+ :paramtype name: str
+ :keyword type: The resource type.
+ :paramtype type: str
+ :keyword group_id: The group ID of the resource.
+ :paramtype group_id: str
+ :keyword required_members: The RequiredMembers of the resource.
+ :paramtype required_members: list[str]
+ """
+ super().__init__(**kwargs)
+ self.id = id
+ self.name = name
+ self.type = type
+ self.group_id = group_id
+ self.required_members = required_members
+ self.private_link_service_id: Optional[str] = None
+
+
+class PrivateLinkResourcesListResult(_serialization.Model):
+ """A list of private link resources.
+
+ :ivar value: The collection value.
+ :vartype value: list[~azure.mgmt.containerservice.models.PrivateLinkResource]
+ """
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[PrivateLinkResource]"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The collection value.
+ :paramtype value: list[~azure.mgmt.containerservice.models.PrivateLinkResource]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+
+
+class PrivateLinkServiceConnectionState(_serialization.Model):
+ """The state of a private link service connection.
+
+ :ivar status: The private link service connection status. Known values are: "Pending",
+ "Approved", "Rejected", and "Disconnected".
+ :vartype status: str or ~azure.mgmt.containerservice.models.ConnectionStatus
+ :ivar description: The private link service connection description.
+ :vartype description: str
+ """
+
+ _attribute_map = {
+ "status": {"key": "status", "type": "str"},
+ "description": {"key": "description", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ status: Optional[Union[str, "_models.ConnectionStatus"]] = None,
+ description: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword status: The private link service connection status. Known values are: "Pending",
+ "Approved", "Rejected", and "Disconnected".
+ :paramtype status: str or ~azure.mgmt.containerservice.models.ConnectionStatus
+ :keyword description: The private link service connection description.
+ :paramtype description: str
+ """
+ super().__init__(**kwargs)
+ self.status = status
+ self.description = description
+
+
+class RelativeMonthlySchedule(_serialization.Model):
+ """For schedules like: 'recur every month on the first Monday' or 'recur every 3 months on last
+ Friday'.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar interval_months: Specifies the number of months between each set of occurrences.
+ Required.
+ :vartype interval_months: int
+ :ivar week_index: The week index. Specifies on which week of the month the dayOfWeek applies.
+ Required. Known values are: "First", "Second", "Third", "Fourth", and "Last".
+ :vartype week_index: str or ~azure.mgmt.containerservice.models.Type
+ :ivar day_of_week: Specifies on which day of the week the maintenance occurs. Required. Known
+ values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday".
+ :vartype day_of_week: str or ~azure.mgmt.containerservice.models.WeekDay
+ """
+
+ _validation = {
+ "interval_months": {"required": True, "maximum": 6, "minimum": 1},
+ "week_index": {"required": True},
+ "day_of_week": {"required": True},
+ }
+
+ _attribute_map = {
+ "interval_months": {"key": "intervalMonths", "type": "int"},
+ "week_index": {"key": "weekIndex", "type": "str"},
+ "day_of_week": {"key": "dayOfWeek", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ interval_months: int,
+ week_index: Union[str, "_models.Type"],
+ day_of_week: Union[str, "_models.WeekDay"],
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword interval_months: Specifies the number of months between each set of occurrences.
+ Required.
+ :paramtype interval_months: int
+ :keyword week_index: The week index. Specifies on which week of the month the dayOfWeek
+ applies. Required. Known values are: "First", "Second", "Third", "Fourth", and "Last".
+ :paramtype week_index: str or ~azure.mgmt.containerservice.models.Type
+ :keyword day_of_week: Specifies on which day of the week the maintenance occurs. Required.
+ Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and
+ "Saturday".
+ :paramtype day_of_week: str or ~azure.mgmt.containerservice.models.WeekDay
+ """
+ super().__init__(**kwargs)
+ self.interval_months = interval_months
+ self.week_index = week_index
+ self.day_of_week = day_of_week
+
+
+class ResourceQuota(_serialization.Model):
+ """Resource quota for the namespace.
+
+ :ivar cpu_request: CPU request of the namespace in one-thousandth CPU form. See `CPU resource
+ units
+ `_
+ for more details.
+ :vartype cpu_request: str
+ :ivar cpu_limit: CPU limit of the namespace in one-thousandth CPU form. See `CPU resource units
+ `_
+ for more details.
+ :vartype cpu_limit: str
+ :ivar memory_request: Memory request of the namespace in the power-of-two equivalents form: Ei,
+ Pi, Ti, Gi, Mi, Ki. See `Memory resource units
+ `_
+ for more details.
+ :vartype memory_request: str
+ :ivar memory_limit: Memory limit of the namespace in the power-of-two equivalents form: Ei, Pi,
+ Ti, Gi, Mi, Ki. See `Memory resource units
+ `_
+ for more details.
+ :vartype memory_limit: str
+ """
+
+ _attribute_map = {
+ "cpu_request": {"key": "cpuRequest", "type": "str"},
+ "cpu_limit": {"key": "cpuLimit", "type": "str"},
+ "memory_request": {"key": "memoryRequest", "type": "str"},
+ "memory_limit": {"key": "memoryLimit", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ cpu_request: Optional[str] = None,
+ cpu_limit: Optional[str] = None,
+ memory_request: Optional[str] = None,
+ memory_limit: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword cpu_request: CPU request of the namespace in one-thousandth CPU form. See `CPU
+ resource units
+ `_
+ for more details.
+ :paramtype cpu_request: str
+ :keyword cpu_limit: CPU limit of the namespace in one-thousandth CPU form. See `CPU resource
+ units
+ `_
+ for more details.
+ :paramtype cpu_limit: str
+ :keyword memory_request: Memory request of the namespace in the power-of-two equivalents form:
+ Ei, Pi, Ti, Gi, Mi, Ki. See `Memory resource units
+ `_
+ for more details.
+ :paramtype memory_request: str
+ :keyword memory_limit: Memory limit of the namespace in the power-of-two equivalents form: Ei,
+ Pi, Ti, Gi, Mi, Ki. See `Memory resource units
+ `_
+ for more details.
+ :paramtype memory_limit: str
+ """
+ super().__init__(**kwargs)
+ self.cpu_request = cpu_request
+ self.cpu_limit = cpu_limit
+ self.memory_request = memory_request
+ self.memory_limit = memory_limit
+
+
+class ResourceReference(_serialization.Model):
+ """A reference to an Azure resource.
+
+ :ivar id: The fully qualified Azure resource id.
+ :vartype id: str
+ """
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ }
+
+ def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
+ """
+ :keyword id: The fully qualified Azure resource id.
+ :paramtype id: str
+ """
+ super().__init__(**kwargs)
+ self.id = id
+
+
+class RunCommandRequest(_serialization.Model):
+ """A run command request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar command: The command to run. Required.
+ :vartype command: str
+ :ivar context: A base64 encoded zip file containing the files required by the command.
+ :vartype context: str
+ :ivar cluster_token: AuthToken issued for AKS AAD Server App.
+ :vartype cluster_token: str
+ """
+
+ _validation = {
+ "command": {"required": True},
+ }
+
+ _attribute_map = {
+ "command": {"key": "command", "type": "str"},
+ "context": {"key": "context", "type": "str"},
+ "cluster_token": {"key": "clusterToken", "type": "str"},
+ }
+
+ def __init__(
+ self, *, command: str, context: Optional[str] = None, cluster_token: Optional[str] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword command: The command to run. Required.
+ :paramtype command: str
+ :keyword context: A base64 encoded zip file containing the files required by the command.
+ :paramtype context: str
+ :keyword cluster_token: AuthToken issued for AKS AAD Server App.
+ :paramtype cluster_token: str
+ """
+ super().__init__(**kwargs)
+ self.command = command
+ self.context = context
+ self.cluster_token = cluster_token
+
+
+class RunCommandResult(_serialization.Model):
+ """run command result.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar id: The command id.
+ :vartype id: str
+ :ivar provisioning_state: provisioning State.
+ :vartype provisioning_state: str
+ :ivar exit_code: The exit code of the command.
+ :vartype exit_code: int
+ :ivar started_at: The time when the command started.
+ :vartype started_at: ~datetime.datetime
+ :ivar finished_at: The time when the command finished.
+ :vartype finished_at: ~datetime.datetime
+ :ivar logs: The command output.
+ :vartype logs: str
+ :ivar reason: An explanation of why provisioningState is set to failed (if so).
+ :vartype reason: str
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ "exit_code": {"readonly": True},
+ "started_at": {"readonly": True},
+ "finished_at": {"readonly": True},
+ "logs": {"readonly": True},
+ "reason": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "exit_code": {"key": "properties.exitCode", "type": "int"},
+ "started_at": {"key": "properties.startedAt", "type": "iso-8601"},
+ "finished_at": {"key": "properties.finishedAt", "type": "iso-8601"},
+ "logs": {"key": "properties.logs", "type": "str"},
+ "reason": {"key": "properties.reason", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.id: Optional[str] = None
+ self.provisioning_state: Optional[str] = None
+ self.exit_code: Optional[int] = None
+ self.started_at: Optional[datetime.datetime] = None
+ self.finished_at: Optional[datetime.datetime] = None
+ self.logs: Optional[str] = None
+ self.reason: Optional[str] = None
+
+
+class ScaleProfile(_serialization.Model):
+ """Specifications on how to scale a VirtualMachines agent pool.
+
+ :ivar manual: Specifications on how to scale the VirtualMachines agent pool to a fixed size.
+ :vartype manual: list[~azure.mgmt.containerservice.models.ManualScaleProfile]
+ """
+
+ _attribute_map = {
+ "manual": {"key": "manual", "type": "[ManualScaleProfile]"},
+ }
+
+ def __init__(self, *, manual: Optional[list["_models.ManualScaleProfile"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword manual: Specifications on how to scale the VirtualMachines agent pool to a fixed size.
+ :paramtype manual: list[~azure.mgmt.containerservice.models.ManualScaleProfile]
+ """
+ super().__init__(**kwargs)
+ self.manual = manual
+
+
+class Schedule(_serialization.Model):
+ """One and only one of the schedule types should be specified. Choose either 'daily', 'weekly',
+ 'absoluteMonthly' or 'relativeMonthly' for your maintenance schedule.
+
+ :ivar daily: For schedules like: 'recur every day' or 'recur every 3 days'.
+ :vartype daily: ~azure.mgmt.containerservice.models.DailySchedule
+ :ivar weekly: For schedules like: 'recur every Monday' or 'recur every 3 weeks on Wednesday'.
+ :vartype weekly: ~azure.mgmt.containerservice.models.WeeklySchedule
+ :ivar absolute_monthly: For schedules like: 'recur every month on the 15th' or 'recur every 3
+ months on the 20th'.
+ :vartype absolute_monthly: ~azure.mgmt.containerservice.models.AbsoluteMonthlySchedule
+ :ivar relative_monthly: For schedules like: 'recur every month on the first Monday' or 'recur
+ every 3 months on last Friday'.
+ :vartype relative_monthly: ~azure.mgmt.containerservice.models.RelativeMonthlySchedule
+ """
+
+ _attribute_map = {
+ "daily": {"key": "daily", "type": "DailySchedule"},
+ "weekly": {"key": "weekly", "type": "WeeklySchedule"},
+ "absolute_monthly": {"key": "absoluteMonthly", "type": "AbsoluteMonthlySchedule"},
+ "relative_monthly": {"key": "relativeMonthly", "type": "RelativeMonthlySchedule"},
+ }
+
+ def __init__(
+ self,
+ *,
+ daily: Optional["_models.DailySchedule"] = None,
+ weekly: Optional["_models.WeeklySchedule"] = None,
+ absolute_monthly: Optional["_models.AbsoluteMonthlySchedule"] = None,
+ relative_monthly: Optional["_models.RelativeMonthlySchedule"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword daily: For schedules like: 'recur every day' or 'recur every 3 days'.
+ :paramtype daily: ~azure.mgmt.containerservice.models.DailySchedule
+ :keyword weekly: For schedules like: 'recur every Monday' or 'recur every 3 weeks on
+ Wednesday'.
+ :paramtype weekly: ~azure.mgmt.containerservice.models.WeeklySchedule
+ :keyword absolute_monthly: For schedules like: 'recur every month on the 15th' or 'recur every
+ 3 months on the 20th'.
+ :paramtype absolute_monthly: ~azure.mgmt.containerservice.models.AbsoluteMonthlySchedule
+ :keyword relative_monthly: For schedules like: 'recur every month on the first Monday' or
+ 'recur every 3 months on last Friday'.
+ :paramtype relative_monthly: ~azure.mgmt.containerservice.models.RelativeMonthlySchedule
+ """
+ super().__init__(**kwargs)
+ self.daily = daily
+ self.weekly = weekly
+ self.absolute_monthly = absolute_monthly
+ self.relative_monthly = relative_monthly
+
+
+class ServiceMeshProfile(_serialization.Model):
+ """Service mesh profile for a managed cluster.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar mode: Mode of the service mesh. Required. Known values are: "Istio" and "Disabled".
+ :vartype mode: str or ~azure.mgmt.containerservice.models.ServiceMeshMode
+ :ivar istio: Istio service mesh configuration.
+ :vartype istio: ~azure.mgmt.containerservice.models.IstioServiceMesh
+ """
+
+ _validation = {
+ "mode": {"required": True},
+ }
+
+ _attribute_map = {
+ "mode": {"key": "mode", "type": "str"},
+ "istio": {"key": "istio", "type": "IstioServiceMesh"},
+ }
+
+ def __init__(
+ self,
+ *,
+ mode: Union[str, "_models.ServiceMeshMode"],
+ istio: Optional["_models.IstioServiceMesh"] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword mode: Mode of the service mesh. Required. Known values are: "Istio" and "Disabled".
+ :paramtype mode: str or ~azure.mgmt.containerservice.models.ServiceMeshMode
+ :keyword istio: Istio service mesh configuration.
+ :paramtype istio: ~azure.mgmt.containerservice.models.IstioServiceMesh
+ """
+ super().__init__(**kwargs)
+ self.mode = mode
+ self.istio = istio
+
+
+class Snapshot(TrackedResource):
+ """A node pool snapshot resource.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ :ivar location: The geo-location where the resource lives. Required.
+ :vartype location: str
+ :ivar creation_data: CreationData to be used to specify the source agent pool resource ID to
+ create this snapshot.
+ :vartype creation_data: ~azure.mgmt.containerservice.models.CreationData
+ :ivar snapshot_type: The type of a snapshot. The default is NodePool. "NodePool"
+ :vartype snapshot_type: str or ~azure.mgmt.containerservice.models.SnapshotType
+ :ivar kubernetes_version: The version of Kubernetes.
+ :vartype kubernetes_version: str
+ :ivar node_image_version: The version of node image.
+ :vartype node_image_version: str
+ :ivar os_type: The operating system type. The default is Linux. Known values are: "Linux" and
+ "Windows".
+ :vartype os_type: str or ~azure.mgmt.containerservice.models.OSType
+ :ivar os_sku: Specifies the OS SKU used by the agent pool. The default is Ubuntu if OSType is
+ Linux. The default is Windows2019 when Kubernetes <= 1.24 or Windows2022 when Kubernetes >=
+ 1.25 if OSType is Windows. Known values are: "Ubuntu", "AzureLinux", "AzureLinux3",
+ "CBLMariner", "Windows2019", "Windows2022", "Ubuntu2204", and "Ubuntu2404".
+ :vartype os_sku: str or ~azure.mgmt.containerservice.models.OSSKU
+ :ivar vm_size: The size of the VM.
+ :vartype vm_size: str
+ :ivar enable_fips: Whether to use a FIPS-enabled OS.
+ :vartype enable_fips: bool
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "location": {"required": True},
+ "kubernetes_version": {"readonly": True},
+ "node_image_version": {"readonly": True},
+ "os_type": {"readonly": True},
+ "os_sku": {"readonly": True},
+ "vm_size": {"readonly": True},
+ "enable_fips": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "tags": {"key": "tags", "type": "{str}"},
+ "location": {"key": "location", "type": "str"},
+ "creation_data": {"key": "properties.creationData", "type": "CreationData"},
+ "snapshot_type": {"key": "properties.snapshotType", "type": "str"},
+ "kubernetes_version": {"key": "properties.kubernetesVersion", "type": "str"},
+ "node_image_version": {"key": "properties.nodeImageVersion", "type": "str"},
+ "os_type": {"key": "properties.osType", "type": "str"},
+ "os_sku": {"key": "properties.osSku", "type": "str"},
+ "vm_size": {"key": "properties.vmSize", "type": "str"},
+ "enable_fips": {"key": "properties.enableFIPS", "type": "bool"},
+ }
+
+ def __init__(
+ self,
+ *,
+ location: str,
+ tags: Optional[dict[str, str]] = None,
+ creation_data: Optional["_models.CreationData"] = None,
+ snapshot_type: Union[str, "_models.SnapshotType"] = "NodePool",
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ :keyword location: The geo-location where the resource lives. Required.
+ :paramtype location: str
+ :keyword creation_data: CreationData to be used to specify the source agent pool resource ID to
+ create this snapshot.
+ :paramtype creation_data: ~azure.mgmt.containerservice.models.CreationData
+ :keyword snapshot_type: The type of a snapshot. The default is NodePool. "NodePool"
+ :paramtype snapshot_type: str or ~azure.mgmt.containerservice.models.SnapshotType
+ """
+ super().__init__(tags=tags, location=location, **kwargs)
+ self.creation_data = creation_data
+ self.snapshot_type = snapshot_type
+ self.kubernetes_version: Optional[str] = None
+ self.node_image_version: Optional[str] = None
+ self.os_type: Optional[Union[str, "_models.OSType"]] = None
+ self.os_sku: Optional[Union[str, "_models.OSSKU"]] = None
+ self.vm_size: Optional[str] = None
+ self.enable_fips: Optional[bool] = None
+
+
+class SnapshotListResult(_serialization.Model):
+ """The response from the List Snapshots operation.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: The list of snapshots.
+ :vartype value: list[~azure.mgmt.containerservice.models.Snapshot]
+ :ivar next_link: The URL to get the next set of snapshot results.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[Snapshot]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.Snapshot"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: The list of snapshots.
+ :paramtype value: list[~azure.mgmt.containerservice.models.Snapshot]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link: Optional[str] = None
+
+
+class SysctlConfig(_serialization.Model):
+ """Sysctl settings for Linux agent nodes.
+
+ :ivar net_core_somaxconn: Sysctl setting net.core.somaxconn.
+ :vartype net_core_somaxconn: int
+ :ivar net_core_netdev_max_backlog: Sysctl setting net.core.netdev_max_backlog.
+ :vartype net_core_netdev_max_backlog: int
+ :ivar net_core_rmem_default: Sysctl setting net.core.rmem_default.
+ :vartype net_core_rmem_default: int
+ :ivar net_core_rmem_max: Sysctl setting net.core.rmem_max.
+ :vartype net_core_rmem_max: int
+ :ivar net_core_wmem_default: Sysctl setting net.core.wmem_default.
+ :vartype net_core_wmem_default: int
+ :ivar net_core_wmem_max: Sysctl setting net.core.wmem_max.
+ :vartype net_core_wmem_max: int
+ :ivar net_core_optmem_max: Sysctl setting net.core.optmem_max.
+ :vartype net_core_optmem_max: int
+ :ivar net_ipv4_tcp_max_syn_backlog: Sysctl setting net.ipv4.tcp_max_syn_backlog.
+ :vartype net_ipv4_tcp_max_syn_backlog: int
+ :ivar net_ipv4_tcp_max_tw_buckets: Sysctl setting net.ipv4.tcp_max_tw_buckets.
+ :vartype net_ipv4_tcp_max_tw_buckets: int
+ :ivar net_ipv4_tcp_fin_timeout: Sysctl setting net.ipv4.tcp_fin_timeout.
+ :vartype net_ipv4_tcp_fin_timeout: int
+ :ivar net_ipv4_tcp_keepalive_time: Sysctl setting net.ipv4.tcp_keepalive_time.
+ :vartype net_ipv4_tcp_keepalive_time: int
+ :ivar net_ipv4_tcp_keepalive_probes: Sysctl setting net.ipv4.tcp_keepalive_probes.
+ :vartype net_ipv4_tcp_keepalive_probes: int
+ :ivar net_ipv4_tcpkeepalive_intvl: Sysctl setting net.ipv4.tcp_keepalive_intvl.
+ :vartype net_ipv4_tcpkeepalive_intvl: int
+ :ivar net_ipv4_tcp_tw_reuse: Sysctl setting net.ipv4.tcp_tw_reuse.
+ :vartype net_ipv4_tcp_tw_reuse: bool
+ :ivar net_ipv4_ip_local_port_range: Sysctl setting net.ipv4.ip_local_port_range.
+ :vartype net_ipv4_ip_local_port_range: str
+ :ivar net_ipv4_neigh_default_gc_thresh1: Sysctl setting net.ipv4.neigh.default.gc_thresh1.
+ :vartype net_ipv4_neigh_default_gc_thresh1: int
+ :ivar net_ipv4_neigh_default_gc_thresh2: Sysctl setting net.ipv4.neigh.default.gc_thresh2.
+ :vartype net_ipv4_neigh_default_gc_thresh2: int
+ :ivar net_ipv4_neigh_default_gc_thresh3: Sysctl setting net.ipv4.neigh.default.gc_thresh3.
+ :vartype net_ipv4_neigh_default_gc_thresh3: int
+ :ivar net_netfilter_nf_conntrack_max: Sysctl setting net.netfilter.nf_conntrack_max.
+ :vartype net_netfilter_nf_conntrack_max: int
+ :ivar net_netfilter_nf_conntrack_buckets: Sysctl setting net.netfilter.nf_conntrack_buckets.
+ :vartype net_netfilter_nf_conntrack_buckets: int
+ :ivar fs_inotify_max_user_watches: Sysctl setting fs.inotify.max_user_watches.
+ :vartype fs_inotify_max_user_watches: int
+ :ivar fs_file_max: Sysctl setting fs.file-max.
+ :vartype fs_file_max: int
+ :ivar fs_aio_max_nr: Sysctl setting fs.aio-max-nr.
+ :vartype fs_aio_max_nr: int
+ :ivar fs_nr_open: Sysctl setting fs.nr_open.
+ :vartype fs_nr_open: int
+ :ivar kernel_threads_max: Sysctl setting kernel.threads-max.
+ :vartype kernel_threads_max: int
+ :ivar vm_max_map_count: Sysctl setting vm.max_map_count.
+ :vartype vm_max_map_count: int
+ :ivar vm_swappiness: Sysctl setting vm.swappiness.
+ :vartype vm_swappiness: int
+ :ivar vm_vfs_cache_pressure: Sysctl setting vm.vfs_cache_pressure.
+ :vartype vm_vfs_cache_pressure: int
+ """
+
+ _validation = {
+ "net_ipv4_tcpkeepalive_intvl": {"maximum": 90, "minimum": 10},
+ "net_netfilter_nf_conntrack_max": {"maximum": 2097152, "minimum": 131072},
+ "net_netfilter_nf_conntrack_buckets": {"maximum": 524288, "minimum": 65536},
+ }
+
+ _attribute_map = {
+ "net_core_somaxconn": {"key": "netCoreSomaxconn", "type": "int"},
+ "net_core_netdev_max_backlog": {"key": "netCoreNetdevMaxBacklog", "type": "int"},
+ "net_core_rmem_default": {"key": "netCoreRmemDefault", "type": "int"},
+ "net_core_rmem_max": {"key": "netCoreRmemMax", "type": "int"},
+ "net_core_wmem_default": {"key": "netCoreWmemDefault", "type": "int"},
+ "net_core_wmem_max": {"key": "netCoreWmemMax", "type": "int"},
+ "net_core_optmem_max": {"key": "netCoreOptmemMax", "type": "int"},
+ "net_ipv4_tcp_max_syn_backlog": {"key": "netIpv4TcpMaxSynBacklog", "type": "int"},
+ "net_ipv4_tcp_max_tw_buckets": {"key": "netIpv4TcpMaxTwBuckets", "type": "int"},
+ "net_ipv4_tcp_fin_timeout": {"key": "netIpv4TcpFinTimeout", "type": "int"},
+ "net_ipv4_tcp_keepalive_time": {"key": "netIpv4TcpKeepaliveTime", "type": "int"},
+ "net_ipv4_tcp_keepalive_probes": {"key": "netIpv4TcpKeepaliveProbes", "type": "int"},
+ "net_ipv4_tcpkeepalive_intvl": {"key": "netIpv4TcpkeepaliveIntvl", "type": "int"},
+ "net_ipv4_tcp_tw_reuse": {"key": "netIpv4TcpTwReuse", "type": "bool"},
+ "net_ipv4_ip_local_port_range": {"key": "netIpv4IpLocalPortRange", "type": "str"},
+ "net_ipv4_neigh_default_gc_thresh1": {"key": "netIpv4NeighDefaultGcThresh1", "type": "int"},
+ "net_ipv4_neigh_default_gc_thresh2": {"key": "netIpv4NeighDefaultGcThresh2", "type": "int"},
+ "net_ipv4_neigh_default_gc_thresh3": {"key": "netIpv4NeighDefaultGcThresh3", "type": "int"},
+ "net_netfilter_nf_conntrack_max": {"key": "netNetfilterNfConntrackMax", "type": "int"},
+ "net_netfilter_nf_conntrack_buckets": {"key": "netNetfilterNfConntrackBuckets", "type": "int"},
+ "fs_inotify_max_user_watches": {"key": "fsInotifyMaxUserWatches", "type": "int"},
+ "fs_file_max": {"key": "fsFileMax", "type": "int"},
+ "fs_aio_max_nr": {"key": "fsAioMaxNr", "type": "int"},
+ "fs_nr_open": {"key": "fsNrOpen", "type": "int"},
+ "kernel_threads_max": {"key": "kernelThreadsMax", "type": "int"},
+ "vm_max_map_count": {"key": "vmMaxMapCount", "type": "int"},
+ "vm_swappiness": {"key": "vmSwappiness", "type": "int"},
+ "vm_vfs_cache_pressure": {"key": "vmVfsCachePressure", "type": "int"},
+ }
+
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ net_core_somaxconn: Optional[int] = None,
+ net_core_netdev_max_backlog: Optional[int] = None,
+ net_core_rmem_default: Optional[int] = None,
+ net_core_rmem_max: Optional[int] = None,
+ net_core_wmem_default: Optional[int] = None,
+ net_core_wmem_max: Optional[int] = None,
+ net_core_optmem_max: Optional[int] = None,
+ net_ipv4_tcp_max_syn_backlog: Optional[int] = None,
+ net_ipv4_tcp_max_tw_buckets: Optional[int] = None,
+ net_ipv4_tcp_fin_timeout: Optional[int] = None,
+ net_ipv4_tcp_keepalive_time: Optional[int] = None,
+ net_ipv4_tcp_keepalive_probes: Optional[int] = None,
+ net_ipv4_tcpkeepalive_intvl: Optional[int] = None,
+ net_ipv4_tcp_tw_reuse: Optional[bool] = None,
+ net_ipv4_ip_local_port_range: Optional[str] = None,
+ net_ipv4_neigh_default_gc_thresh1: Optional[int] = None,
+ net_ipv4_neigh_default_gc_thresh2: Optional[int] = None,
+ net_ipv4_neigh_default_gc_thresh3: Optional[int] = None,
+ net_netfilter_nf_conntrack_max: Optional[int] = None,
+ net_netfilter_nf_conntrack_buckets: Optional[int] = None,
+ fs_inotify_max_user_watches: Optional[int] = None,
+ fs_file_max: Optional[int] = None,
+ fs_aio_max_nr: Optional[int] = None,
+ fs_nr_open: Optional[int] = None,
+ kernel_threads_max: Optional[int] = None,
+ vm_max_map_count: Optional[int] = None,
+ vm_swappiness: Optional[int] = None,
+ vm_vfs_cache_pressure: Optional[int] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword net_core_somaxconn: Sysctl setting net.core.somaxconn.
+ :paramtype net_core_somaxconn: int
+ :keyword net_core_netdev_max_backlog: Sysctl setting net.core.netdev_max_backlog.
+ :paramtype net_core_netdev_max_backlog: int
+ :keyword net_core_rmem_default: Sysctl setting net.core.rmem_default.
+ :paramtype net_core_rmem_default: int
+ :keyword net_core_rmem_max: Sysctl setting net.core.rmem_max.
+ :paramtype net_core_rmem_max: int
+ :keyword net_core_wmem_default: Sysctl setting net.core.wmem_default.
+ :paramtype net_core_wmem_default: int
+ :keyword net_core_wmem_max: Sysctl setting net.core.wmem_max.
+ :paramtype net_core_wmem_max: int
+ :keyword net_core_optmem_max: Sysctl setting net.core.optmem_max.
+ :paramtype net_core_optmem_max: int
+ :keyword net_ipv4_tcp_max_syn_backlog: Sysctl setting net.ipv4.tcp_max_syn_backlog.
+ :paramtype net_ipv4_tcp_max_syn_backlog: int
+ :keyword net_ipv4_tcp_max_tw_buckets: Sysctl setting net.ipv4.tcp_max_tw_buckets.
+ :paramtype net_ipv4_tcp_max_tw_buckets: int
+ :keyword net_ipv4_tcp_fin_timeout: Sysctl setting net.ipv4.tcp_fin_timeout.
+ :paramtype net_ipv4_tcp_fin_timeout: int
+ :keyword net_ipv4_tcp_keepalive_time: Sysctl setting net.ipv4.tcp_keepalive_time.
+ :paramtype net_ipv4_tcp_keepalive_time: int
+ :keyword net_ipv4_tcp_keepalive_probes: Sysctl setting net.ipv4.tcp_keepalive_probes.
+ :paramtype net_ipv4_tcp_keepalive_probes: int
+ :keyword net_ipv4_tcpkeepalive_intvl: Sysctl setting net.ipv4.tcp_keepalive_intvl.
+ :paramtype net_ipv4_tcpkeepalive_intvl: int
+ :keyword net_ipv4_tcp_tw_reuse: Sysctl setting net.ipv4.tcp_tw_reuse.
+ :paramtype net_ipv4_tcp_tw_reuse: bool
+ :keyword net_ipv4_ip_local_port_range: Sysctl setting net.ipv4.ip_local_port_range.
+ :paramtype net_ipv4_ip_local_port_range: str
+ :keyword net_ipv4_neigh_default_gc_thresh1: Sysctl setting net.ipv4.neigh.default.gc_thresh1.
+ :paramtype net_ipv4_neigh_default_gc_thresh1: int
+ :keyword net_ipv4_neigh_default_gc_thresh2: Sysctl setting net.ipv4.neigh.default.gc_thresh2.
+ :paramtype net_ipv4_neigh_default_gc_thresh2: int
+ :keyword net_ipv4_neigh_default_gc_thresh3: Sysctl setting net.ipv4.neigh.default.gc_thresh3.
+ :paramtype net_ipv4_neigh_default_gc_thresh3: int
+ :keyword net_netfilter_nf_conntrack_max: Sysctl setting net.netfilter.nf_conntrack_max.
+ :paramtype net_netfilter_nf_conntrack_max: int
+ :keyword net_netfilter_nf_conntrack_buckets: Sysctl setting net.netfilter.nf_conntrack_buckets.
+ :paramtype net_netfilter_nf_conntrack_buckets: int
+ :keyword fs_inotify_max_user_watches: Sysctl setting fs.inotify.max_user_watches.
+ :paramtype fs_inotify_max_user_watches: int
+ :keyword fs_file_max: Sysctl setting fs.file-max.
+ :paramtype fs_file_max: int
+ :keyword fs_aio_max_nr: Sysctl setting fs.aio-max-nr.
+ :paramtype fs_aio_max_nr: int
+ :keyword fs_nr_open: Sysctl setting fs.nr_open.
+ :paramtype fs_nr_open: int
+ :keyword kernel_threads_max: Sysctl setting kernel.threads-max.
+ :paramtype kernel_threads_max: int
+ :keyword vm_max_map_count: Sysctl setting vm.max_map_count.
+ :paramtype vm_max_map_count: int
+ :keyword vm_swappiness: Sysctl setting vm.swappiness.
+ :paramtype vm_swappiness: int
+ :keyword vm_vfs_cache_pressure: Sysctl setting vm.vfs_cache_pressure.
+ :paramtype vm_vfs_cache_pressure: int
+ """
+ super().__init__(**kwargs)
+ self.net_core_somaxconn = net_core_somaxconn
+ self.net_core_netdev_max_backlog = net_core_netdev_max_backlog
+ self.net_core_rmem_default = net_core_rmem_default
+ self.net_core_rmem_max = net_core_rmem_max
+ self.net_core_wmem_default = net_core_wmem_default
+ self.net_core_wmem_max = net_core_wmem_max
+ self.net_core_optmem_max = net_core_optmem_max
+ self.net_ipv4_tcp_max_syn_backlog = net_ipv4_tcp_max_syn_backlog
+ self.net_ipv4_tcp_max_tw_buckets = net_ipv4_tcp_max_tw_buckets
+ self.net_ipv4_tcp_fin_timeout = net_ipv4_tcp_fin_timeout
+ self.net_ipv4_tcp_keepalive_time = net_ipv4_tcp_keepalive_time
+ self.net_ipv4_tcp_keepalive_probes = net_ipv4_tcp_keepalive_probes
+ self.net_ipv4_tcpkeepalive_intvl = net_ipv4_tcpkeepalive_intvl
+ self.net_ipv4_tcp_tw_reuse = net_ipv4_tcp_tw_reuse
+ self.net_ipv4_ip_local_port_range = net_ipv4_ip_local_port_range
+ self.net_ipv4_neigh_default_gc_thresh1 = net_ipv4_neigh_default_gc_thresh1
+ self.net_ipv4_neigh_default_gc_thresh2 = net_ipv4_neigh_default_gc_thresh2
+ self.net_ipv4_neigh_default_gc_thresh3 = net_ipv4_neigh_default_gc_thresh3
+ self.net_netfilter_nf_conntrack_max = net_netfilter_nf_conntrack_max
+ self.net_netfilter_nf_conntrack_buckets = net_netfilter_nf_conntrack_buckets
+ self.fs_inotify_max_user_watches = fs_inotify_max_user_watches
+ self.fs_file_max = fs_file_max
+ self.fs_aio_max_nr = fs_aio_max_nr
+ self.fs_nr_open = fs_nr_open
+ self.kernel_threads_max = kernel_threads_max
+ self.vm_max_map_count = vm_max_map_count
+ self.vm_swappiness = vm_swappiness
+ self.vm_vfs_cache_pressure = vm_vfs_cache_pressure
+
+
+class SystemData(_serialization.Model):
+ """Metadata pertaining to creation and last modification of the resource.
+
+ :ivar created_by: The identity that created the resource.
+ :vartype created_by: str
+ :ivar created_by_type: The type of identity that created the resource. Known values are:
+ "User", "Application", "ManagedIdentity", and "Key".
+ :vartype created_by_type: str or ~azure.mgmt.containerservice.models.CreatedByType
+ :ivar created_at: The timestamp of resource creation (UTC).
+ :vartype created_at: ~datetime.datetime
+ :ivar last_modified_by: The identity that last modified the resource.
+ :vartype last_modified_by: str
+ :ivar last_modified_by_type: The type of identity that last modified the resource. Known values
+ are: "User", "Application", "ManagedIdentity", and "Key".
+ :vartype last_modified_by_type: str or ~azure.mgmt.containerservice.models.CreatedByType
+ :ivar last_modified_at: The timestamp of resource last modification (UTC).
+ :vartype last_modified_at: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ "created_by": {"key": "createdBy", "type": "str"},
+ "created_by_type": {"key": "createdByType", "type": "str"},
+ "created_at": {"key": "createdAt", "type": "iso-8601"},
+ "last_modified_by": {"key": "lastModifiedBy", "type": "str"},
+ "last_modified_by_type": {"key": "lastModifiedByType", "type": "str"},
+ "last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"},
+ }
+
+ def __init__(
+ self,
+ *,
+ created_by: Optional[str] = None,
+ created_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
+ created_at: Optional[datetime.datetime] = None,
+ last_modified_by: Optional[str] = None,
+ last_modified_by_type: Optional[Union[str, "_models.CreatedByType"]] = None,
+ last_modified_at: Optional[datetime.datetime] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword created_by: The identity that created the resource.
+ :paramtype created_by: str
+ :keyword created_by_type: The type of identity that created the resource. Known values are:
+ "User", "Application", "ManagedIdentity", and "Key".
+ :paramtype created_by_type: str or ~azure.mgmt.containerservice.models.CreatedByType
+ :keyword created_at: The timestamp of resource creation (UTC).
+ :paramtype created_at: ~datetime.datetime
+ :keyword last_modified_by: The identity that last modified the resource.
+ :paramtype last_modified_by: str
+ :keyword last_modified_by_type: The type of identity that last modified the resource. Known
+ values are: "User", "Application", "ManagedIdentity", and "Key".
+ :paramtype last_modified_by_type: str or ~azure.mgmt.containerservice.models.CreatedByType
+ :keyword last_modified_at: The timestamp of resource last modification (UTC).
+ :paramtype last_modified_at: ~datetime.datetime
+ """
+ super().__init__(**kwargs)
+ self.created_by = created_by
+ self.created_by_type = created_by_type
+ self.created_at = created_at
+ self.last_modified_by = last_modified_by
+ self.last_modified_by_type = last_modified_by_type
+ self.last_modified_at = last_modified_at
+
+
+class TagsObject(_serialization.Model):
+ """Tags object for patch operations.
+
+ :ivar tags: Resource tags.
+ :vartype tags: dict[str, str]
+ """
+
+ _attribute_map = {
+ "tags": {"key": "tags", "type": "{str}"},
+ }
+
+ def __init__(self, *, tags: Optional[dict[str, str]] = None, **kwargs: Any) -> None:
+ """
+ :keyword tags: Resource tags.
+ :paramtype tags: dict[str, str]
+ """
+ super().__init__(**kwargs)
+ self.tags = tags
+
+
+class TimeInWeek(_serialization.Model):
+ """Time in a week.
+
+ :ivar day: The day of the week. Known values are: "Sunday", "Monday", "Tuesday", "Wednesday",
+ "Thursday", "Friday", and "Saturday".
+ :vartype day: str or ~azure.mgmt.containerservice.models.WeekDay
+ :ivar hour_slots: A list of hours in the day used to identify a time range. Each integer hour
+ represents a time range beginning at 0m after the hour ending at the next hour (non-inclusive).
+ 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1] means the 00:00 -
+ 02:00 UTC time range.
+ :vartype hour_slots: list[int]
+ """
+
+ _attribute_map = {
+ "day": {"key": "day", "type": "str"},
+ "hour_slots": {"key": "hourSlots", "type": "[int]"},
+ }
+
+ def __init__(
+ self,
+ *,
+ day: Optional[Union[str, "_models.WeekDay"]] = None,
+ hour_slots: Optional[list[int]] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword day: The day of the week. Known values are: "Sunday", "Monday", "Tuesday",
+ "Wednesday", "Thursday", "Friday", and "Saturday".
+ :paramtype day: str or ~azure.mgmt.containerservice.models.WeekDay
+ :keyword hour_slots: A list of hours in the day used to identify a time range. Each integer
+ hour represents a time range beginning at 0m after the hour ending at the next hour
+ (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1]
+ means the 00:00 - 02:00 UTC time range.
+ :paramtype hour_slots: list[int]
+ """
+ super().__init__(**kwargs)
+ self.day = day
+ self.hour_slots = hour_slots
+
+
+class TimeSpan(_serialization.Model):
+ """A time range. For example, between 2021-05-25T13:00:00Z and 2021-05-25T14:00:00Z.
+
+ :ivar start: The start of a time span.
+ :vartype start: ~datetime.datetime
+ :ivar end: The end of a time span.
+ :vartype end: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ "start": {"key": "start", "type": "iso-8601"},
+ "end": {"key": "end", "type": "iso-8601"},
+ }
+
+ def __init__(
+ self, *, start: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword start: The start of a time span.
+ :paramtype start: ~datetime.datetime
+ :keyword end: The end of a time span.
+ :paramtype end: ~datetime.datetime
+ """
+ super().__init__(**kwargs)
+ self.start = start
+ self.end = end
+
+
+class TrustedAccessRole(_serialization.Model):
+ """Trusted access role definition.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar source_resource_type: Resource type of Azure resource.
+ :vartype source_resource_type: str
+ :ivar name: Name of role, name is unique under a source resource type.
+ :vartype name: str
+ :ivar rules: List of rules for the role. This maps to 'rules' property of `Kubernetes Cluster
+ Role
+ `_.
+ :vartype rules: list[~azure.mgmt.containerservice.models.TrustedAccessRoleRule]
+ """
+
+ _validation = {
+ "source_resource_type": {"readonly": True},
+ "name": {"readonly": True},
+ "rules": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "source_resource_type": {"key": "sourceResourceType", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "rules": {"key": "rules", "type": "[TrustedAccessRoleRule]"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.source_resource_type: Optional[str] = None
+ self.name: Optional[str] = None
+ self.rules: Optional[list["_models.TrustedAccessRoleRule"]] = None
+
+
+class TrustedAccessRoleBinding(Resource):
+ """Defines binding between a resource and role.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar id: Fully qualified resource ID for the resource. E.g.
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}".
+ :vartype id: str
+ :ivar name: The name of the resource.
+ :vartype name: str
+ :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
+ "Microsoft.Storage/storageAccounts".
+ :vartype type: str
+ :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
+ information.
+ :vartype system_data: ~azure.mgmt.containerservice.models.SystemData
+ :ivar provisioning_state: The current provisioning state of trusted access role binding. Known
+ values are: "Canceled", "Deleting", "Failed", "Succeeded", and "Updating".
+ :vartype provisioning_state: str or
+ ~azure.mgmt.containerservice.models.TrustedAccessRoleBindingProvisioningState
+ :ivar source_resource_id: The ARM resource ID of source resource that trusted access is
+ configured for. Required.
+ :vartype source_resource_id: str
+ :ivar roles: A list of roles to bind, each item is a resource type qualified role name. For
+ example: 'Microsoft.MachineLearningServices/workspaces/reader'. Required.
+ :vartype roles: list[str]
+ """
+
+ _validation = {
+ "id": {"readonly": True},
+ "name": {"readonly": True},
+ "type": {"readonly": True},
+ "system_data": {"readonly": True},
+ "provisioning_state": {"readonly": True},
+ "source_resource_id": {"required": True},
+ "roles": {"required": True},
+ }
+
+ _attribute_map = {
+ "id": {"key": "id", "type": "str"},
+ "name": {"key": "name", "type": "str"},
+ "type": {"key": "type", "type": "str"},
+ "system_data": {"key": "systemData", "type": "SystemData"},
+ "provisioning_state": {"key": "properties.provisioningState", "type": "str"},
+ "source_resource_id": {"key": "properties.sourceResourceId", "type": "str"},
+ "roles": {"key": "properties.roles", "type": "[str]"},
+ }
+
+ def __init__(self, *, source_resource_id: str, roles: list[str], **kwargs: Any) -> None:
+ """
+ :keyword source_resource_id: The ARM resource ID of source resource that trusted access is
+ configured for. Required.
+ :paramtype source_resource_id: str
+ :keyword roles: A list of roles to bind, each item is a resource type qualified role name. For
+ example: 'Microsoft.MachineLearningServices/workspaces/reader'. Required.
+ :paramtype roles: list[str]
+ """
+ super().__init__(**kwargs)
+ self.provisioning_state: Optional[Union[str, "_models.TrustedAccessRoleBindingProvisioningState"]] = None
+ self.source_resource_id = source_resource_id
+ self.roles = roles
+
+
+class TrustedAccessRoleBindingListResult(_serialization.Model):
+ """List of trusted access role bindings.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: Role binding list.
+ :vartype value: list[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :ivar next_link: Link to next page of resources.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[TrustedAccessRoleBinding]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, *, value: Optional[list["_models.TrustedAccessRoleBinding"]] = None, **kwargs: Any) -> None:
+ """
+ :keyword value: Role binding list.
+ :paramtype value: list[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ """
+ super().__init__(**kwargs)
+ self.value = value
+ self.next_link: Optional[str] = None
+
+
+class TrustedAccessRoleListResult(_serialization.Model):
+ """List of trusted access roles.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar value: Role list.
+ :vartype value: list[~azure.mgmt.containerservice.models.TrustedAccessRole]
+ :ivar next_link: Link to next page of resources.
+ :vartype next_link: str
+ """
+
+ _validation = {
+ "value": {"readonly": True},
+ "next_link": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "value": {"key": "value", "type": "[TrustedAccessRole]"},
+ "next_link": {"key": "nextLink", "type": "str"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.value: Optional[list["_models.TrustedAccessRole"]] = None
+ self.next_link: Optional[str] = None
+
+
+class TrustedAccessRoleRule(_serialization.Model):
+ """Rule for trusted access role.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar verbs: List of allowed verbs.
+ :vartype verbs: list[str]
+ :ivar api_groups: List of allowed apiGroups.
+ :vartype api_groups: list[str]
+ :ivar resources: List of allowed resources.
+ :vartype resources: list[str]
+ :ivar resource_names: List of allowed names.
+ :vartype resource_names: list[str]
+ :ivar non_resource_ur_ls: List of allowed nonResourceURLs.
+ :vartype non_resource_ur_ls: list[str]
+ """
+
+ _validation = {
+ "verbs": {"readonly": True},
+ "api_groups": {"readonly": True},
+ "resources": {"readonly": True},
+ "resource_names": {"readonly": True},
+ "non_resource_ur_ls": {"readonly": True},
+ }
+
+ _attribute_map = {
+ "verbs": {"key": "verbs", "type": "[str]"},
+ "api_groups": {"key": "apiGroups", "type": "[str]"},
+ "resources": {"key": "resources", "type": "[str]"},
+ "resource_names": {"key": "resourceNames", "type": "[str]"},
+ "non_resource_ur_ls": {"key": "nonResourceURLs", "type": "[str]"},
+ }
+
+ def __init__(self, **kwargs: Any) -> None:
+ """ """
+ super().__init__(**kwargs)
+ self.verbs: Optional[list[str]] = None
+ self.api_groups: Optional[list[str]] = None
+ self.resources: Optional[list[str]] = None
+ self.resource_names: Optional[list[str]] = None
+ self.non_resource_ur_ls: Optional[list[str]] = None
+
+
+class UpgradeOverrideSettings(_serialization.Model):
+ """Settings for overrides when upgrading a cluster.
+
+ :ivar force_upgrade: Whether to force upgrade the cluster. Note that this option instructs
+ upgrade operation to bypass upgrade protections such as checking for deprecated API usage.
+ Enable this option only with caution.
+ :vartype force_upgrade: bool
+ :ivar until: Until when the overrides are effective. Note that this only matches the start time
+ of an upgrade, and the effectiveness won't change once an upgrade starts even if the ``until``
+ expires as upgrade proceeds. This field is not set by default. It must be set for the overrides
+ to take effect.
+ :vartype until: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ "force_upgrade": {"key": "forceUpgrade", "type": "bool"},
+ "until": {"key": "until", "type": "iso-8601"},
+ }
+
+ def __init__(
+ self, *, force_upgrade: Optional[bool] = None, until: Optional[datetime.datetime] = None, **kwargs: Any
+ ) -> None:
+ """
+ :keyword force_upgrade: Whether to force upgrade the cluster. Note that this option instructs
+ upgrade operation to bypass upgrade protections such as checking for deprecated API usage.
+ Enable this option only with caution.
+ :paramtype force_upgrade: bool
+ :keyword until: Until when the overrides are effective. Note that this only matches the start
+ time of an upgrade, and the effectiveness won't change once an upgrade starts even if the
+ ``until`` expires as upgrade proceeds. This field is not set by default. It must be set for the
+ overrides to take effect.
+ :paramtype until: ~datetime.datetime
+ """
+ super().__init__(**kwargs)
+ self.force_upgrade = force_upgrade
+ self.until = until
+
+
+class VirtualMachineNodes(_serialization.Model):
+ """Current status on a group of nodes of the same vm size.
+
+ :ivar size: The VM size of the agents used to host this group of nodes.
+ :vartype size: str
+ :ivar count: Number of nodes.
+ :vartype count: int
+ """
+
+ _attribute_map = {
+ "size": {"key": "size", "type": "str"},
+ "count": {"key": "count", "type": "int"},
+ }
+
+ def __init__(self, *, size: Optional[str] = None, count: Optional[int] = None, **kwargs: Any) -> None:
+ """
+ :keyword size: The VM size of the agents used to host this group of nodes.
+ :paramtype size: str
+ :keyword count: Number of nodes.
+ :paramtype count: int
+ """
+ super().__init__(**kwargs)
+ self.size = size
+ self.count = count
+
+
+class VirtualMachinesProfile(_serialization.Model):
+ """Specifications on VirtualMachines agent pool.
+
+ :ivar scale: Specifications on how to scale a VirtualMachines agent pool.
+ :vartype scale: ~azure.mgmt.containerservice.models.ScaleProfile
+ """
+
+ _attribute_map = {
+ "scale": {"key": "scale", "type": "ScaleProfile"},
+ }
+
+ def __init__(self, *, scale: Optional["_models.ScaleProfile"] = None, **kwargs: Any) -> None:
+ """
+ :keyword scale: Specifications on how to scale a VirtualMachines agent pool.
+ :paramtype scale: ~azure.mgmt.containerservice.models.ScaleProfile
+ """
+ super().__init__(**kwargs)
+ self.scale = scale
+
+
+class WeeklySchedule(_serialization.Model):
+ """For schedules like: 'recur every Monday' or 'recur every 3 weeks on Wednesday'.
+
+ All required parameters must be populated in order to send to server.
+
+ :ivar interval_weeks: Specifies the number of weeks between each set of occurrences. Required.
+ :vartype interval_weeks: int
+ :ivar day_of_week: Specifies on which day of the week the maintenance occurs. Required. Known
+ values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and "Saturday".
+ :vartype day_of_week: str or ~azure.mgmt.containerservice.models.WeekDay
+ """
+
+ _validation = {
+ "interval_weeks": {"required": True, "maximum": 4, "minimum": 1},
+ "day_of_week": {"required": True},
+ }
+
+ _attribute_map = {
+ "interval_weeks": {"key": "intervalWeeks", "type": "int"},
+ "day_of_week": {"key": "dayOfWeek", "type": "str"},
+ }
+
+ def __init__(self, *, interval_weeks: int, day_of_week: Union[str, "_models.WeekDay"], **kwargs: Any) -> None:
+ """
+ :keyword interval_weeks: Specifies the number of weeks between each set of occurrences.
+ Required.
+ :paramtype interval_weeks: int
+ :keyword day_of_week: Specifies on which day of the week the maintenance occurs. Required.
+ Known values are: "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", and
+ "Saturday".
+ :paramtype day_of_week: str or ~azure.mgmt.containerservice.models.WeekDay
+ """
+ super().__init__(**kwargs)
+ self.interval_weeks = interval_weeks
+ self.day_of_week = day_of_week
+
+
+class WindowsGmsaProfile(_serialization.Model):
+ """Windows gMSA Profile in the managed cluster.
+
+ :ivar enabled: Whether to enable Windows gMSA. Specifies whether to enable Windows gMSA in the
+ managed cluster.
+ :vartype enabled: bool
+ :ivar dns_server: Specifies the DNS server for Windows gMSA. :code:`
`\\ :code:`
` Set it
+ to empty if you have configured the DNS server in the vnet which is used to create the managed
+ cluster.
+ :vartype dns_server: str
+ :ivar root_domain_name: Specifies the root domain name for Windows gMSA. :code:`
`\\
+ :code:`
` Set it to empty if you have configured the DNS server in the vnet which is used to
+ create the managed cluster.
+ :vartype root_domain_name: str
+ """
+
+ _attribute_map = {
+ "enabled": {"key": "enabled", "type": "bool"},
+ "dns_server": {"key": "dnsServer", "type": "str"},
+ "root_domain_name": {"key": "rootDomainName", "type": "str"},
+ }
+
+ def __init__(
+ self,
+ *,
+ enabled: Optional[bool] = None,
+ dns_server: Optional[str] = None,
+ root_domain_name: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ """
+ :keyword enabled: Whether to enable Windows gMSA. Specifies whether to enable Windows gMSA in
+ the managed cluster.
+ :paramtype enabled: bool
+ :keyword dns_server: Specifies the DNS server for Windows gMSA. :code:`
`\\ :code:`
` Set
+ it to empty if you have configured the DNS server in the vnet which is used to create the
+ managed cluster.
+ :paramtype dns_server: str
+ :keyword root_domain_name: Specifies the root domain name for Windows gMSA. :code:`
`\\
+ :code:`
` Set it to empty if you have configured the DNS server in the vnet which is used to
+ create the managed cluster.
+ :paramtype root_domain_name: str
+ """
+ super().__init__(**kwargs)
+ self.enabled = enabled
+ self.dns_server = dns_server
+ self.root_domain_name = root_domain_name
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_patch.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_patch.py
new file mode 100644
index 00000000000..8bcb627aa47
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/models/_patch.py
@@ -0,0 +1,21 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+ """Do not remove from this file.
+
+ `patch_sdk` is a last resort escape hatch that allows you to do customizations
+ you can't accomplish using the techniques described in
+ https://aka.ms/azsdk/python/dpcodegen/python/customize
+ """
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/__init__.py
new file mode 100644
index 00000000000..70b2a677e7b
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/__init__.py
@@ -0,0 +1,47 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+from ._operations import Operations # type: ignore
+from ._managed_clusters_operations import ManagedClustersOperations # type: ignore
+from ._maintenance_configurations_operations import MaintenanceConfigurationsOperations # type: ignore
+from ._managed_namespaces_operations import ManagedNamespacesOperations # type: ignore
+from ._agent_pools_operations import AgentPoolsOperations # type: ignore
+from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations # type: ignore
+from ._private_link_resources_operations import PrivateLinkResourcesOperations # type: ignore
+from ._resolve_private_link_service_id_operations import ResolvePrivateLinkServiceIdOperations # type: ignore
+from ._snapshots_operations import SnapshotsOperations # type: ignore
+from ._trusted_access_role_bindings_operations import TrustedAccessRoleBindingsOperations # type: ignore
+from ._trusted_access_roles_operations import TrustedAccessRolesOperations # type: ignore
+from ._machines_operations import MachinesOperations # type: ignore
+
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+ "Operations",
+ "ManagedClustersOperations",
+ "MaintenanceConfigurationsOperations",
+ "ManagedNamespacesOperations",
+ "AgentPoolsOperations",
+ "PrivateEndpointConnectionsOperations",
+ "PrivateLinkResourcesOperations",
+ "ResolvePrivateLinkServiceIdOperations",
+ "SnapshotsOperations",
+ "TrustedAccessRoleBindingsOperations",
+ "TrustedAccessRolesOperations",
+ "MachinesOperations",
+]
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
+_patch_sdk()
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_agent_pools_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_agent_pools_operations.py
new file mode 100644
index 00000000000..203a6ca90b7
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_agent_pools_operations.py
@@ -0,0 +1,1612 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_abort_latest_operation_request(
+ resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclusters/{resourceName}/agentPools/{agentPoolName}/abort",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ subscription_id: str,
+ *,
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if if_match is not None:
+ _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+ if if_none_match is not None:
+ _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ subscription_id: str,
+ *,
+ ignore_pod_disruption_budget: Optional[bool] = None,
+ if_match: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if ignore_pod_disruption_budget is not None:
+ _params["ignore-pod-disruption-budget"] = _SERIALIZER.query(
+ "ignore_pod_disruption_budget", ignore_pod_disruption_budget, "bool"
+ )
+
+ # Construct headers
+ if if_match is not None:
+ _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_upgrade_profile_request(
+ resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_machines_request(
+ resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/deleteMachines",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_available_agent_pool_versions_request( # pylint: disable=name-too-long
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_upgrade_node_image_version_request(
+ resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class AgentPoolsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`agent_pools` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ def _abort_latest_operation_initial(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_abort_latest_operation_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["location"] = self._deserialize("str", response.headers.get("location"))
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_abort_latest_operation(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Aborts last operation running on agent pool.
+
+ Aborts the currently running operation on the agent pool. The Agent Pool will be moved to a
+ Canceling state and eventually to a Canceled state when cancellation finishes. If the operation
+ completes before cancellation can take place, a 409 error code is returned.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._abort_latest_operation_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> ItemPaged["_models.AgentPool"]:
+ """Gets a list of agent pools in the specified managed cluster.
+
+ Gets a list of agent pools in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either AgentPool or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AgentPoolListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> _models.AgentPool:
+ """Gets the specified managed cluster agent pool.
+
+ Gets the specified managed cluster agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: AgentPool or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.AgentPool
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("AgentPool", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ parameters: Union[_models.AgentPool, IO[bytes]],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "AgentPool")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ if_match=if_match,
+ if_none_match=if_none_match,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ parameters: _models.AgentPool,
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.AgentPool]:
+ """Creates or updates an agent pool in the specified managed cluster.
+
+ Creates or updates an agent pool in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param parameters: The agent pool to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.AgentPool
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either AgentPool or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ parameters: IO[bytes],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.AgentPool]:
+ """Creates or updates an agent pool in the specified managed cluster.
+
+ Creates or updates an agent pool in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param parameters: The agent pool to create or update. Required.
+ :type parameters: IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either AgentPool or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ parameters: Union[_models.AgentPool, IO[bytes]],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> LROPoller[_models.AgentPool]:
+ """Creates or updates an agent pool in the specified managed cluster.
+
+ Creates or updates an agent pool in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param parameters: The agent pool to create or update. Is either a AgentPool type or a
+ IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.AgentPool or IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :return: An instance of LROPoller that returns either AgentPool or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ parameters=parameters,
+ if_match=if_match,
+ if_none_match=if_none_match,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("AgentPool", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.AgentPool].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.AgentPool](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ def _delete_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ ignore_pod_disruption_budget: Optional[bool] = None,
+ if_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ ignore_pod_disruption_budget=ignore_pod_disruption_budget,
+ if_match=if_match,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ ignore_pod_disruption_budget: Optional[bool] = None,
+ if_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes an agent pool in the specified managed cluster.
+
+ Deletes an agent pool in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param ignore_pod_disruption_budget: ignore-pod-disruption-budget=true to delete those pods on
+ a node without considering Pod Disruption Budget. Default value is None.
+ :type ignore_pod_disruption_budget: bool
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ ignore_pod_disruption_budget=ignore_pod_disruption_budget,
+ if_match=if_match,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get_upgrade_profile(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> _models.AgentPoolUpgradeProfile:
+ """Gets the upgrade profile for an agent pool.
+
+ Gets the upgrade profile for an agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: AgentPoolUpgradeProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.AgentPoolUpgradeProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AgentPoolUpgradeProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_upgrade_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("AgentPoolUpgradeProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _delete_machines_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(machines, (IOBase, bytes)):
+ _content = machines
+ else:
+ _json = self._serialize.body(machines, "AgentPoolDeleteMachinesParameter")
+
+ _request = build_delete_machines_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_delete_machines(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machines: _models.AgentPoolDeleteMachinesParameter,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes specific machines in an agent pool.
+
+ Deletes specific machines in an agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param machines: A list of machines from the agent pool to be deleted. Required.
+ :type machines: ~azure.mgmt.containerservice.models.AgentPoolDeleteMachinesParameter
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_delete_machines(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machines: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes specific machines in an agent pool.
+
+ Deletes specific machines in an agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param machines: A list of machines from the agent pool to be deleted. Required.
+ :type machines: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_delete_machines(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machines: Union[_models.AgentPoolDeleteMachinesParameter, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes specific machines in an agent pool.
+
+ Deletes specific machines in an agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param machines: A list of machines from the agent pool to be deleted. Is either a
+ AgentPoolDeleteMachinesParameter type or a IO[bytes] type. Required.
+ :type machines: ~azure.mgmt.containerservice.models.AgentPoolDeleteMachinesParameter or
+ IO[bytes]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_machines_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ machines=machines,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @distributed_trace
+ def get_available_agent_pool_versions(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> _models.AgentPoolAvailableVersions:
+ """Gets a list of supported Kubernetes versions for the specified agent pool.
+
+ See `supported Kubernetes versions
+ `_ for more details about
+ the version lifecycle.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: AgentPoolAvailableVersions or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.AgentPoolAvailableVersions
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.AgentPoolAvailableVersions] = kwargs.pop("cls", None)
+
+ _request = build_get_available_agent_pool_versions_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("AgentPoolAvailableVersions", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _upgrade_node_image_version_initial(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_upgrade_node_image_version_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_upgrade_node_image_version(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> LROPoller[_models.AgentPool]:
+ """Upgrades the node image version of an agent pool to the latest.
+
+ Upgrading the node image version of an agent pool applies the newest OS and runtime updates to
+ the nodes. AKS provides one new image per week with the latest updates. For more details on
+ node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: An instance of LROPoller that returns either AgentPool or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.AgentPool]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._upgrade_node_image_version_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ response_headers = {}
+ response = pipeline_response.http_response
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("AgentPool", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.AgentPool].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.AgentPool](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_machines_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_machines_operations.py
new file mode 100644
index 00000000000..48bbc9dc786
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_machines_operations.py
@@ -0,0 +1,313 @@
+# pylint: disable=line-too-long,useless-suppression
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from typing import Any, Callable, Optional, TypeVar
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, resource_name: str, agent_pool_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/machines",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str,
+ resource_name: str,
+ agent_pool_name: str,
+ machine_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/machines/{machineName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "agentPoolName": _SERIALIZER.url(
+ "agent_pool_name", agent_pool_name, "str", max_length=12, min_length=1, pattern=r"^[a-z][a-z0-9]{0,11}$"
+ ),
+ "machineName": _SERIALIZER.url(
+ "machine_name", machine_name, "str", pattern=r"^[a-zA-Z0-9][-_a-zA-Z0-9]{0,39}$"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class MachinesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`machines` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
+ ) -> ItemPaged["_models.Machine"]:
+ """Gets a list of machines in the specified agent pool.
+
+ Gets a list of machines in the specified agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :return: An iterator like instance of either Machine or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.Machine]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MachineListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("MachineListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, resource_name: str, agent_pool_name: str, machine_name: str, **kwargs: Any
+ ) -> _models.Machine:
+ """Get a specific machine in the specified agent pool.
+
+ Get a specific machine in the specified agent pool.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param agent_pool_name: The name of the agent pool. Required.
+ :type agent_pool_name: str
+ :param machine_name: host name of the machine. Required.
+ :type machine_name: str
+ :return: Machine or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Machine
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.Machine] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ agent_pool_name=agent_pool_name,
+ machine_name=machine_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Machine", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_maintenance_configurations_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_maintenance_configurations_operations.py
new file mode 100644
index 00000000000..5a1763a2421
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_maintenance_configurations_operations.py
@@ -0,0 +1,599 @@
+# pylint: disable=line-too-long,useless-suppression
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Optional, TypeVar, Union, overload
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_by_managed_cluster_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "configName": _SERIALIZER.url("config_name", config_name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "configName": _SERIALIZER.url("config_name", config_name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, resource_name: str, config_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "configName": _SERIALIZER.url("config_name", config_name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class MaintenanceConfigurationsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`maintenance_configurations` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list_by_managed_cluster(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> ItemPaged["_models.MaintenanceConfiguration"]:
+ """Gets a list of maintenance configurations in the specified managed cluster.
+
+ Gets a list of maintenance configurations in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either MaintenanceConfiguration or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.MaintenanceConfiguration]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MaintenanceConfigurationListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_by_managed_cluster_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
+ ) -> _models.MaintenanceConfiguration:
+ """Gets the specified maintenance configuration of a managed cluster.
+
+ Gets the specified maintenance configuration of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :return: MaintenanceConfiguration or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ config_name=config_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ config_name: str,
+ parameters: _models.MaintenanceConfiguration,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.MaintenanceConfiguration:
+ """Creates or updates a maintenance configuration in the specified managed cluster.
+
+ Creates or updates a maintenance configuration in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :param parameters: The maintenance configuration to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: MaintenanceConfiguration or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ config_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.MaintenanceConfiguration:
+ """Creates or updates a maintenance configuration in the specified managed cluster.
+
+ Creates or updates a maintenance configuration in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :param parameters: The maintenance configuration to create or update. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: MaintenanceConfiguration or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ config_name: str,
+ parameters: Union[_models.MaintenanceConfiguration, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.MaintenanceConfiguration:
+ """Creates or updates a maintenance configuration in the specified managed cluster.
+
+ Creates or updates a maintenance configuration in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :param parameters: The maintenance configuration to create or update. Is either a
+ MaintenanceConfiguration type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.MaintenanceConfiguration or IO[bytes]
+ :return: MaintenanceConfiguration or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MaintenanceConfiguration
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.MaintenanceConfiguration] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "MaintenanceConfiguration")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ config_name=config_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
+ ) -> None:
+ """Deletes a maintenance configuration.
+
+ Deletes a maintenance configuration.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param config_name: The name of the maintenance configuration. Supported values are 'default',
+ 'aksManagedAutoUpgradeSchedule', or 'aksManagedNodeOSUpgradeSchedule'. Required.
+ :type config_name: str
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ config_name=config_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_managed_clusters_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_managed_clusters_operations.py
new file mode 100644
index 00000000000..e32ef0b8666
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_managed_clusters_operations.py
@@ -0,0 +1,3985 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_kubernetes_versions_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/kubernetesVersions",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "location": _SERIALIZER.url("location", location, "str", min_length=1),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters"
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_upgrade_profile_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_access_profile_request(
+ resource_group_name: str, resource_name: str, role_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "roleName": _SERIALIZER.url("role_name", role_name, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_cluster_admin_credentials_request( # pylint: disable=name-too-long
+ resource_group_name: str,
+ resource_name: str,
+ subscription_id: str,
+ *,
+ server_fqdn: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if server_fqdn is not None:
+ _params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_cluster_user_credentials_request( # pylint: disable=name-too-long
+ resource_group_name: str,
+ resource_name: str,
+ subscription_id: str,
+ *,
+ server_fqdn: Optional[str] = None,
+ format: Optional[Union[str, _models.Format]] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if server_fqdn is not None:
+ _params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str")
+ if format is not None:
+ _params["format"] = _SERIALIZER.query("format", format, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_cluster_monitoring_user_credentials_request( # pylint: disable=name-too-long
+ resource_group_name: str,
+ resource_name: str,
+ subscription_id: str,
+ *,
+ server_fqdn: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+ if server_fqdn is not None:
+ _params["server-fqdn"] = _SERIALIZER.query("server_fqdn", server_fqdn, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str,
+ resource_name: str,
+ subscription_id: str,
+ *,
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if if_match is not None:
+ _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+ if if_none_match is not None:
+ _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_tags_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, *, if_match: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if if_match is not None:
+ _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, *, if_match: Optional[str] = None, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if if_match is not None:
+ _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_reset_service_principal_profile_request( # pylint: disable=name-too-long
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_reset_aad_profile_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_rotate_cluster_certificates_request( # pylint: disable=name-too-long
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_abort_latest_operation_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclusters/{resourceName}/abort",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_rotate_service_account_signing_keys_request( # pylint: disable=name-too-long
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateServiceAccountSigningKeys",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_stop_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_start_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_run_command_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/runCommand",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_command_result_request(
+ resource_group_name: str, resource_name: str, command_id: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/commandResults/{commandId}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "commandId": _SERIALIZER.url("command_id", command_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_outbound_network_dependencies_endpoints_request( # pylint: disable=name-too-long
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/outboundNetworkDependenciesEndpoints",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_mesh_revision_profiles_request( # pylint: disable=name-too-long
+ location: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/meshRevisionProfiles",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "location": _SERIALIZER.url("location", location, "str", min_length=1),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_mesh_revision_profile_request(
+ location: str, mode: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/meshRevisionProfiles/{mode}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "location": _SERIALIZER.url("location", location, "str", min_length=1),
+ "mode": _SERIALIZER.url(
+ "mode",
+ mode,
+ "str",
+ max_length=24,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_mesh_upgrade_profiles_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/meshUpgradeProfiles",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_mesh_upgrade_profile_request(
+ resource_group_name: str, resource_name: str, mode: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/meshUpgradeProfiles/{mode}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "mode": _SERIALIZER.url(
+ "mode",
+ mode,
+ "str",
+ max_length=24,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ManagedClustersOperations: # pylint: disable=too-many-public-methods
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`managed_clusters` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list_kubernetes_versions(self, location: str, **kwargs: Any) -> _models.KubernetesVersionListResult:
+ """Gets a list of supported Kubernetes versions in the specified subscription.
+
+ Contains extra metadata on the version, including supported patch versions, capabilities,
+ available upgrades, and details on preview status of the version.
+
+ :param location: The name of the Azure region. Required.
+ :type location: str
+ :return: KubernetesVersionListResult or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.KubernetesVersionListResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.KubernetesVersionListResult] = kwargs.pop("cls", None)
+
+ _request = build_list_kubernetes_versions_request(
+ location=location,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("KubernetesVersionListResult", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list(self, **kwargs: Any) -> ItemPaged["_models.ManagedCluster"]:
+ """Gets a list of managed clusters in the specified subscription.
+
+ Gets a list of managed clusters in the specified subscription.
+
+ :return: An iterator like instance of either ManagedCluster or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> ItemPaged["_models.ManagedCluster"]:
+ """Lists managed clusters in the specified subscription and resource group.
+
+ Lists managed clusters in the specified subscription and resource group.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :return: An iterator like instance of either ManagedCluster or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedClusterListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_by_resource_group_request(
+ resource_group_name=resource_group_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get_upgrade_profile(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> _models.ManagedClusterUpgradeProfile:
+ """Gets the upgrade profile of a managed cluster.
+
+ Gets the upgrade profile of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: ManagedClusterUpgradeProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedClusterUpgradeProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedClusterUpgradeProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_upgrade_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedClusterUpgradeProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def get_access_profile(
+ self, resource_group_name: str, resource_name: str, role_name: str, **kwargs: Any
+ ) -> _models.ManagedClusterAccessProfile:
+ """Gets an access profile of a managed cluster.
+
+ **WARNING**\\ : This API will be deprecated. Instead use `ListClusterUserCredentials
+ `_ or
+ `ListClusterAdminCredentials
+ `_ .
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param role_name: The name of the role for managed cluster accessProfile resource. Required.
+ :type role_name: str
+ :return: ManagedClusterAccessProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedClusterAccessProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedClusterAccessProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_access_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ role_name=role_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedClusterAccessProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list_cluster_admin_credentials(
+ self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any
+ ) -> _models.CredentialResults:
+ """Lists the admin credentials of a managed cluster.
+
+ Lists the admin credentials of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
+ :type server_fqdn: str
+ :return: CredentialResults or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.CredentialResults
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None)
+
+ _request = build_list_cluster_admin_credentials_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ server_fqdn=server_fqdn,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CredentialResults", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list_cluster_user_credentials(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ server_fqdn: Optional[str] = None,
+ format: Optional[Union[str, _models.Format]] = None,
+ **kwargs: Any
+ ) -> _models.CredentialResults:
+ """Lists the user credentials of a managed cluster.
+
+ Lists the user credentials of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
+ :type server_fqdn: str
+ :param format: Only apply to AAD clusters, specifies the format of returned kubeconfig. Format
+ 'azure' will return azure auth-provider kubeconfig; format 'exec' will return exec format
+ kubeconfig, which requires kubelogin binary in the path. Known values are: "azure", "exec", and
+ "exec". Default value is None.
+ :type format: str or ~azure.mgmt.containerservice.models.Format
+ :return: CredentialResults or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.CredentialResults
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None)
+
+ _request = build_list_cluster_user_credentials_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ server_fqdn=server_fqdn,
+ format=format,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CredentialResults", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list_cluster_monitoring_user_credentials(
+ self, resource_group_name: str, resource_name: str, server_fqdn: Optional[str] = None, **kwargs: Any
+ ) -> _models.CredentialResults:
+ """Lists the cluster monitoring user credentials of a managed cluster.
+
+ Lists the cluster monitoring user credentials of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param server_fqdn: server fqdn type for credentials to be returned. Default value is None.
+ :type server_fqdn: str
+ :return: CredentialResults or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.CredentialResults
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None)
+
+ _request = build_list_cluster_monitoring_user_credentials_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ server_fqdn=server_fqdn,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CredentialResults", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.ManagedCluster:
+ """Gets a managed cluster.
+
+ Gets a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: ManagedCluster or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedCluster
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedCluster, IO[bytes]],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ManagedCluster")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ if_match=if_match,
+ if_none_match=if_none_match,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.ManagedCluster,
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedCluster]:
+ """Creates or updates a managed cluster.
+
+ Creates or updates a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The managed cluster to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedCluster
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedCluster]:
+ """Creates or updates a managed cluster.
+
+ Creates or updates a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The managed cluster to create or update. Required.
+ :type parameters: IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedCluster, IO[bytes]],
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedCluster]:
+ """Creates or updates a managed cluster.
+
+ Creates or updates a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The managed cluster to create or update. Is either a ManagedCluster type or
+ a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedCluster or IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :param if_none_match: The request should only proceed if no entity matches this string. Default
+ value is None.
+ :type if_none_match: str
+ :return: An instance of LROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ parameters=parameters,
+ if_match=if_match,
+ if_none_match=if_none_match,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.ManagedCluster].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.ManagedCluster](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ def _update_tags_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.TagsObject, IO[bytes]],
+ if_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "TagsObject")
+
+ _request = build_update_tags_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ if_match=if_match,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.TagsObject,
+ if_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedCluster]:
+ """Updates tags on a managed cluster.
+
+ Updates tags on a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ if_match: Optional[str] = None,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedCluster]:
+ """Updates tags on a managed cluster.
+
+ Updates tags on a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Required.
+ :type parameters: IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.TagsObject, IO[bytes]],
+ if_match: Optional[str] = None,
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedCluster]:
+ """Updates tags on a managed cluster.
+
+ Updates tags on a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update Managed Cluster Tags operation. Is either
+ a TagsObject type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes]
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :return: An instance of LROPoller that returns either ManagedCluster or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedCluster]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedCluster] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._update_tags_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ parameters=parameters,
+ if_match=if_match,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("ManagedCluster", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.ManagedCluster].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.ManagedCluster](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ def _delete_initial(
+ self, resource_group_name: str, resource_name: str, if_match: Optional[str] = None, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ if_match=if_match,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, resource_name: str, if_match: Optional[str] = None, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes a managed cluster.
+
+ Deletes a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param if_match: The request should only proceed if an entity matches this string. Default
+ value is None.
+ :type if_match: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ if_match=if_match,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ def _reset_service_principal_profile_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ManagedClusterServicePrincipalProfile")
+
+ _request = build_reset_service_principal_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_reset_service_principal_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.ManagedClusterServicePrincipalProfile,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Reset the Service Principal Profile of a managed cluster.
+
+ This action cannot be performed on a cluster that is not using a service principal.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The service principal profile to set on the managed cluster. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_reset_service_principal_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Reset the Service Principal Profile of a managed cluster.
+
+ This action cannot be performed on a cluster that is not using a service principal.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The service principal profile to set on the managed cluster. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_reset_service_principal_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedClusterServicePrincipalProfile, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Reset the Service Principal Profile of a managed cluster.
+
+ This action cannot be performed on a cluster that is not using a service principal.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The service principal profile to set on the managed cluster. Is either a
+ ManagedClusterServicePrincipalProfile type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterServicePrincipalProfile or
+ IO[bytes]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._reset_service_principal_profile_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ parameters=parameters,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ def _reset_aad_profile_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ManagedClusterAADProfile")
+
+ _request = build_reset_aad_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_reset_aad_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.ManagedClusterAADProfile,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Reset the AAD Profile of a managed cluster.
+
+ **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory
+ integration `_ to update your cluster with AKS-managed Azure
+ AD.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The AAD profile to set on the Managed Cluster. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_reset_aad_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Reset the AAD Profile of a managed cluster.
+
+ **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory
+ integration `_ to update your cluster with AKS-managed Azure
+ AD.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The AAD profile to set on the Managed Cluster. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_reset_aad_profile(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.ManagedClusterAADProfile, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[None]:
+ """Reset the AAD Profile of a managed cluster.
+
+ **WARNING**\\ : This API will be deprecated. Please see `AKS-managed Azure Active Directory
+ integration `_ to update your cluster with AKS-managed Azure
+ AD.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The AAD profile to set on the Managed Cluster. Is either a
+ ManagedClusterAADProfile type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedClusterAADProfile or IO[bytes]
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._reset_aad_profile_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ parameters=parameters,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ def _rotate_cluster_certificates_initial(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_rotate_cluster_certificates_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_rotate_cluster_certificates(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Rotates the certificates of a managed cluster.
+
+ See `Certificate rotation `_ for
+ more details about rotating managed cluster certificates.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._rotate_cluster_certificates_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ def _abort_latest_operation_initial(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_abort_latest_operation_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["location"] = self._deserialize("str", response.headers.get("location"))
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_abort_latest_operation(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Aborts last operation running on managed cluster.
+
+ Aborts the currently running operation on the managed cluster. The Managed Cluster will be
+ moved to a Canceling state and eventually to a Canceled state when cancellation finishes. If
+ the operation completes before cancellation can take place, a 409 error code is returned.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._abort_latest_operation_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ def _rotate_service_account_signing_keys_initial( # pylint: disable=name-too-long
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_rotate_service_account_signing_keys_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_rotate_service_account_signing_keys( # pylint: disable=name-too-long
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Rotates the service account signing keys of a managed cluster.
+
+ Rotates the service account signing keys of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._rotate_service_account_signing_keys_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ def _stop_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_stop_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_stop(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]:
+ """Stops a Managed Cluster.
+
+ This can only be performed on Azure Virtual Machine Scale set backed clusters. Stopping a
+ cluster stops the control plane and agent nodes entirely, while maintaining all object and
+ cluster state. A cluster does not accrue charges while it is stopped. See `stopping a cluster
+ `_ for more details about stopping a
+ cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._stop_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ def _start_initial(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_start_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_start(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> LROPoller[None]:
+ """Starts a previously stopped Managed Cluster.
+
+ See `starting a cluster `_ for more
+ details about starting a cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._start_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ def _run_command_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ request_payload: Union[_models.RunCommandRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(request_payload, (IOBase, bytes)):
+ _content = request_payload
+ else:
+ _json = self._serialize.body(request_payload, "RunCommandRequest")
+
+ _request = build_run_command_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_run_command(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ request_payload: _models.RunCommandRequest,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.RunCommandResult]:
+ """Submits a command to run against the Managed Cluster.
+
+ AKS will create a pod to run the command. This is primarily useful for private clusters. For
+ more information see `AKS Run Command
+ `_.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param request_payload: The run command request. Required.
+ :type request_payload: ~azure.mgmt.containerservice.models.RunCommandRequest
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either RunCommandResult or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.RunCommandResult]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_run_command(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ request_payload: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.RunCommandResult]:
+ """Submits a command to run against the Managed Cluster.
+
+ AKS will create a pod to run the command. This is primarily useful for private clusters. For
+ more information see `AKS Run Command
+ `_.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param request_payload: The run command request. Required.
+ :type request_payload: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either RunCommandResult or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.RunCommandResult]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_run_command(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ request_payload: Union[_models.RunCommandRequest, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.RunCommandResult]:
+ """Submits a command to run against the Managed Cluster.
+
+ AKS will create a pod to run the command. This is primarily useful for private clusters. For
+ more information see `AKS Run Command
+ `_.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param request_payload: The run command request. Is either a RunCommandRequest type or a
+ IO[bytes] type. Required.
+ :type request_payload: ~azure.mgmt.containerservice.models.RunCommandRequest or IO[bytes]
+ :return: An instance of LROPoller that returns either RunCommandResult or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.RunCommandResult]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.RunCommandResult] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._run_command_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ request_payload=request_payload,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("RunCommandResult", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(
+ PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
+ )
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.RunCommandResult].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.RunCommandResult](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ @distributed_trace
+ def get_command_result(
+ self, resource_group_name: str, resource_name: str, command_id: str, **kwargs: Any
+ ) -> Optional[_models.RunCommandResult]:
+ """Gets the results of a command which has been run on the Managed Cluster.
+
+ Gets the results of a command which has been run on the Managed Cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param command_id: Id of the command. Required.
+ :type command_id: str
+ :return: RunCommandResult or None or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.RunCommandResult or None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Optional[_models.RunCommandResult]] = kwargs.pop("cls", None)
+
+ _request = build_get_command_result_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ command_id=command_id,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = None
+ response_headers = {}
+ if response.status_code == 200:
+ deserialized = self._deserialize("RunCommandResult", pipeline_response.http_response)
+
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list_outbound_network_dependencies_endpoints( # pylint: disable=name-too-long
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> ItemPaged["_models.OutboundEnvironmentEndpoint"]:
+ """Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the
+ specified managed cluster.
+
+ Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the
+ specified managed cluster. The operation returns properties of each egress endpoint.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either OutboundEnvironmentEndpoint or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.OutboundEnvironmentEndpoint]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OutboundEnvironmentEndpointCollection] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_outbound_network_dependencies_endpoints_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("OutboundEnvironmentEndpointCollection", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def list_mesh_revision_profiles(self, location: str, **kwargs: Any) -> ItemPaged["_models.MeshRevisionProfile"]:
+ """Lists mesh revision profiles for all meshes in the specified location.
+
+ Contains extra metadata on each revision, including supported revisions, cluster compatibility
+ and available upgrades.
+
+ :param location: The name of the Azure region. Required.
+ :type location: str
+ :return: An iterator like instance of either MeshRevisionProfile or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.MeshRevisionProfile]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MeshRevisionProfileList] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_mesh_revision_profiles_request(
+ location=location,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("MeshRevisionProfileList", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get_mesh_revision_profile(self, location: str, mode: str, **kwargs: Any) -> _models.MeshRevisionProfile:
+ """Gets a mesh revision profile for a specified mesh in the specified location.
+
+ Contains extra metadata on the revision, including supported revisions, cluster compatibility
+ and available upgrades.
+
+ :param location: The name of the Azure region. Required.
+ :type location: str
+ :param mode: The mode of the mesh. Required.
+ :type mode: str
+ :return: MeshRevisionProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MeshRevisionProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MeshRevisionProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_mesh_revision_profile_request(
+ location=location,
+ mode=mode,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MeshRevisionProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list_mesh_upgrade_profiles(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> ItemPaged["_models.MeshUpgradeProfile"]:
+ """Lists available upgrades for all service meshes in a specific cluster.
+
+ Lists available upgrades for all service meshes in a specific cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either MeshUpgradeProfile or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.MeshUpgradeProfile]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MeshUpgradeProfileList] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_mesh_upgrade_profiles_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("MeshUpgradeProfileList", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get_mesh_upgrade_profile(
+ self, resource_group_name: str, resource_name: str, mode: str, **kwargs: Any
+ ) -> _models.MeshUpgradeProfile:
+ """Gets available upgrades for a service mesh in a cluster.
+
+ Gets available upgrades for a service mesh in a cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param mode: The mode of the mesh. Required.
+ :type mode: str
+ :return: MeshUpgradeProfile or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.MeshUpgradeProfile
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.MeshUpgradeProfile] = kwargs.pop("cls", None)
+
+ _request = build_get_mesh_upgrade_profile_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ mode=mode,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("MeshUpgradeProfile", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_managed_namespaces_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_managed_namespaces_operations.py
new file mode 100644
index 00000000000..8dfe5b99e86
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_managed_namespaces_operations.py
@@ -0,0 +1,1086 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_by_managed_cluster_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "managedNamespaceName": _SERIALIZER.url(
+ "managed_namespace_name",
+ managed_namespace_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "managedNamespaceName": _SERIALIZER.url(
+ "managed_namespace_name",
+ managed_namespace_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "managedNamespaceName": _SERIALIZER.url(
+ "managed_namespace_name",
+ managed_namespace_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_request(
+ resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "managedNamespaceName": _SERIALIZER.url(
+ "managed_namespace_name",
+ managed_namespace_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_credential_request(
+ resource_group_name: str, resource_name: str, managed_namespace_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/managedNamespaces/{managedNamespaceName}/listCredential",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "managedNamespaceName": _SERIALIZER.url(
+ "managed_namespace_name",
+ managed_namespace_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"[a-z0-9]([-a-z0-9]*[a-z0-9])?",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ManagedNamespacesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`managed_namespaces` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list_by_managed_cluster(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> ItemPaged["_models.ManagedNamespace"]:
+ """Gets a list of managed namespaces in the specified managed cluster.
+
+ Gets a list of managed namespaces in the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedNamespaceListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_by_managed_cluster_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("ManagedNamespaceListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any
+ ) -> _models.ManagedNamespace:
+ """Gets the specified namespace of a managed cluster.
+
+ Gets the specified namespace of a managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :return: ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: Union[_models.ManagedNamespace, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "ManagedNamespace")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: _models.ManagedNamespace,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNamespace]:
+ """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: The namespace to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ManagedNamespace or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNamespace]:
+ """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: The namespace to create or update. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either ManagedNamespace or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: Union[_models.ManagedNamespace, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.ManagedNamespace]:
+ """Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ Creates or updates a namespace managed by ARM for the specified managed cluster. Users can
+ configure aspects like resource quotas, network ingress/egress policies, and more. See
+ aka.ms/aks/managed-namespaces for more details.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: The namespace to create or update. Is either a ManagedNamespace type or a
+ IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.ManagedNamespace or IO[bytes]
+ :return: An instance of LROPoller that returns either ManagedNamespace or the result of
+ cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.ManagedNamespace]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ parameters=parameters,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ response_headers = {}
+ response = pipeline_response.http_response
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.ManagedNamespace].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.ManagedNamespace](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ def _delete_initial(
+ self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ if response.status_code == 204:
+ response_headers["Azure-AsyncOperation"] = self._deserialize(
+ "str", response.headers.get("Azure-AsyncOperation")
+ )
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes a namespace.
+
+ Deletes a namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
+
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: _models.TagsObject,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.ManagedNamespace:
+ """Updates tags on a managed namespace.
+
+ Updates tags on a managed namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: Parameters supplied to the patch namespace operation, we only support patch
+ tags for now. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.ManagedNamespace:
+ """Updates tags on a managed namespace.
+
+ Updates tags on a managed namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: Parameters supplied to the patch namespace operation, we only support patch
+ tags for now. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ managed_namespace_name: str,
+ parameters: Union[_models.TagsObject, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.ManagedNamespace:
+ """Updates tags on a managed namespace.
+
+ Updates tags on a managed namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :param parameters: Parameters supplied to the patch namespace operation, we only support patch
+ tags for now. Is either a TagsObject type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes]
+ :return: ManagedNamespace or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.ManagedNamespace
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.ManagedNamespace] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "TagsObject")
+
+ _request = build_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("ManagedNamespace", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def list_credential(
+ self, resource_group_name: str, resource_name: str, managed_namespace_name: str, **kwargs: Any
+ ) -> _models.CredentialResults:
+ """Lists the credentials of a namespace.
+
+ Lists the credentials of a namespace.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param managed_namespace_name: The name of the managed namespace. Required.
+ :type managed_namespace_name: str
+ :return: CredentialResults or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.CredentialResults
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.CredentialResults] = kwargs.pop("cls", None)
+
+ _request = build_list_credential_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ managed_namespace_name=managed_namespace_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("CredentialResults", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_operations.py
new file mode 100644
index 00000000000..204312518ba
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_operations.py
@@ -0,0 +1,155 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from typing import Any, Callable, Optional, TypeVar
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(**kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop("template_url", "/providers/Microsoft.ContainerService/operations")
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class Operations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`operations` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(self, **kwargs: Any) -> ItemPaged["_models.OperationValue"]:
+ """Gets a list of operations.
+
+ Gets a list of operations.
+
+ :return: An iterator like instance of either OperationValue or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.OperationValue]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("OperationListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_patch.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_patch.py
new file mode 100644
index 00000000000..8bcb627aa47
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_patch.py
@@ -0,0 +1,21 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+from typing import List
+
+__all__: List[str] = [] # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+ """Do not remove from this file.
+
+ `patch_sdk` is a last resort escape hatch that allows you to do customizations
+ you can't accomplish using the techniques described in
+ https://aka.ms/azsdk/python/dpcodegen/python/customize
+ """
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_private_endpoint_connections_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_private_endpoint_connections_operations.py
new file mode 100644
index 00000000000..a0e8fc786f9
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_private_endpoint_connections_operations.py
@@ -0,0 +1,641 @@
+# pylint: disable=line-too-long,useless-suppression
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "privateEndpointConnectionName": _SERIALIZER.url(
+ "private_endpoint_connection_name", private_endpoint_connection_name, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_request(
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "privateEndpointConnectionName": _SERIALIZER.url(
+ "private_endpoint_connection_name", private_endpoint_connection_name, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "privateEndpointConnectionName": _SERIALIZER.url(
+ "private_endpoint_connection_name", private_endpoint_connection_name, "str"
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class PrivateEndpointConnectionsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`private_endpoint_connections` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> _models.PrivateEndpointConnectionListResult:
+ """Gets a list of private endpoint connections in the specified managed cluster.
+
+ To learn more about private clusters, see:
+ https://docs.microsoft.com/azure/aks/private-clusters.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: PrivateEndpointConnectionListResult or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnectionListResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
+ ) -> _models.PrivateEndpointConnection:
+ """Gets the specified private endpoint connection.
+
+ To learn more about private clusters, see:
+ https://docs.microsoft.com/azure/aks/private-clusters.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :return: PrivateEndpointConnection or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ parameters: _models.PrivateEndpointConnection,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.PrivateEndpointConnection:
+ """Updates a private endpoint connection.
+
+ Updates a private endpoint connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :param parameters: The updated private endpoint connection. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: PrivateEndpointConnection or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.PrivateEndpointConnection:
+ """Updates a private endpoint connection.
+
+ Updates a private endpoint connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :param parameters: The updated private endpoint connection. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: PrivateEndpointConnection or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ private_endpoint_connection_name: str,
+ parameters: Union[_models.PrivateEndpointConnection, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.PrivateEndpointConnection:
+ """Updates a private endpoint connection.
+
+ Updates a private endpoint connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :param parameters: The updated private endpoint connection. Is either a
+ PrivateEndpointConnection type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.PrivateEndpointConnection or IO[bytes]
+ :return: PrivateEndpointConnection or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateEndpointConnection
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "PrivateEndpointConnection")
+
+ _request = build_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _delete_initial(
+ self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Deletes a private endpoint connection.
+
+ Deletes a private endpoint connection.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param private_endpoint_connection_name: The name of the private endpoint connection. Required.
+ :type private_endpoint_connection_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ private_endpoint_connection_name=private_endpoint_connection_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_private_link_resources_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_private_link_resources_operations.py
new file mode 100644
index 00000000000..88a56faf5fa
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_private_link_resources_operations.py
@@ -0,0 +1,160 @@
+# pylint: disable=line-too-long,useless-suppression
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from typing import Any, Callable, Optional, TypeVar
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class PrivateLinkResourcesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`private_link_resources` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> _models.PrivateLinkResourcesListResult:
+ """Gets a list of private link resources in the specified managed cluster.
+
+ To learn more about private clusters, see:
+ https://docs.microsoft.com/azure/aks/private-clusters.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: PrivateLinkResourcesListResult or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResourcesListResult
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None)
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_resolve_private_link_service_id_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_resolve_private_link_service_id_operations.py
new file mode 100644
index 00000000000..f8b8ab7cfd1
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_resolve_private_link_service_id_operations.py
@@ -0,0 +1,240 @@
+# pylint: disable=line-too-long,useless-suppression
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Optional, TypeVar, Union, overload
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_post_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class ResolvePrivateLinkServiceIdOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`resolve_private_link_service_id` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @overload
+ def post(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.PrivateLinkResource,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.PrivateLinkResource:
+ """Gets the private link service ID for the specified managed cluster.
+
+ Gets the private link service ID for the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters required in order to resolve a private link service ID. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.PrivateLinkResource
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: PrivateLinkResource or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def post(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.PrivateLinkResource:
+ """Gets the private link service ID for the specified managed cluster.
+
+ Gets the private link service ID for the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters required in order to resolve a private link service ID. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: PrivateLinkResource or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def post(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.PrivateLinkResource, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.PrivateLinkResource:
+ """Gets the private link service ID for the specified managed cluster.
+
+ Gets the private link service ID for the specified managed cluster.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters required in order to resolve a private link service ID. Is either
+ a PrivateLinkResource type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.PrivateLinkResource or IO[bytes]
+ :return: PrivateLinkResource or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.PrivateLinkResource
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.PrivateLinkResource] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "PrivateLinkResource")
+
+ _request = build_post_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("PrivateLinkResource", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_snapshots_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_snapshots_operations.py
new file mode 100644
index 00000000000..805ed298bed
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_snapshots_operations.py
@@ -0,0 +1,841 @@
+# pylint: disable=line-too-long,useless-suppression
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Optional, TypeVar, Union, overload
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/snapshots")
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_update_tags_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class SnapshotsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`snapshots` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(self, **kwargs: Any) -> ItemPaged["_models.Snapshot"]:
+ """Gets a list of snapshots in the specified subscription.
+
+ Gets a list of snapshots in the specified subscription.
+
+ :return: An iterator like instance of either Snapshot or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.Snapshot]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("SnapshotListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> ItemPaged["_models.Snapshot"]:
+ """Lists snapshots in the specified subscription and resource group.
+
+ Lists snapshots in the specified subscription and resource group.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :return: An iterator like instance of either Snapshot or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.Snapshot]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_by_resource_group_request(
+ resource_group_name=resource_group_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("SnapshotListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.Snapshot:
+ """Gets a snapshot.
+
+ Gets a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Snapshot", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.Snapshot,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Creates or updates a snapshot.
+
+ Creates or updates a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The snapshot to create or update. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.Snapshot
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Creates or updates a snapshot.
+
+ Creates or updates a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The snapshot to create or update. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.Snapshot, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Creates or updates a snapshot.
+
+ Creates or updates a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: The snapshot to create or update. Is either a Snapshot type or a IO[bytes]
+ type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.Snapshot or IO[bytes]
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "Snapshot")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Snapshot", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: _models.TagsObject,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Updates tags on a snapshot.
+
+ Updates tags on a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update snapshot Tags operation. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Updates tags on a snapshot.
+
+ Updates tags on a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update snapshot Tags operation. Required.
+ :type parameters: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def update_tags(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ parameters: Union[_models.TagsObject, IO[bytes]],
+ **kwargs: Any
+ ) -> _models.Snapshot:
+ """Updates tags on a snapshot.
+
+ Updates tags on a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param parameters: Parameters supplied to the Update snapshot Tags operation. Is either a
+ TagsObject type or a IO[bytes] type. Required.
+ :type parameters: ~azure.mgmt.containerservice.models.TagsObject or IO[bytes]
+ :return: Snapshot or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.Snapshot
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(parameters, (IOBase, bytes)):
+ _content = parameters
+ else:
+ _json = self._serialize.body(parameters, "TagsObject")
+
+ _request = build_update_tags_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("Snapshot", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def delete( # pylint: disable=inconsistent-return-statements
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> None:
+ """Deletes a snapshot.
+
+ Deletes a snapshot.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_trusted_access_role_bindings_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_trusted_access_role_bindings_operations.py
new file mode 100644
index 00000000000..4e7e2f450c6
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_trusted_access_role_bindings_operations.py
@@ -0,0 +1,756 @@
+# pylint: disable=line-too-long,useless-suppression
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from io import IOBase
+from typing import Any, Callable, IO, Iterator, Optional, TypeVar, Union, cast, overload
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ StreamClosedError,
+ StreamConsumedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(
+ resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_get_request(
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "trustedAccessRoleBindingName": _SERIALIZER.url(
+ "trusted_access_role_binding_name",
+ trusted_access_role_binding_name,
+ "str",
+ max_length=24,
+ min_length=1,
+ pattern=r"^([A-Za-z0-9-])+$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_create_or_update_request(
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "trustedAccessRoleBindingName": _SERIALIZER.url(
+ "trusted_access_role_binding_name",
+ trusted_access_role_binding_name,
+ "str",
+ max_length=24,
+ min_length=1,
+ pattern=r"^([A-Za-z0-9-])+$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ if content_type is not None:
+ _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+def build_delete_request(
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ subscription_id: str,
+ **kwargs: Any
+) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/trustedAccessRoleBindings/{trustedAccessRoleBindingName}",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "resourceGroupName": _SERIALIZER.url(
+ "resource_group_name", resource_group_name, "str", max_length=90, min_length=1
+ ),
+ "resourceName": _SERIALIZER.url(
+ "resource_name",
+ resource_name,
+ "str",
+ max_length=63,
+ min_length=1,
+ pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
+ ),
+ "trustedAccessRoleBindingName": _SERIALIZER.url(
+ "trusted_access_role_binding_name",
+ trusted_access_role_binding_name,
+ "str",
+ max_length=24,
+ min_length=1,
+ pattern=r"^([A-Za-z0-9-])+$",
+ ),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class TrustedAccessRoleBindingsOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`trusted_access_role_bindings` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(
+ self, resource_group_name: str, resource_name: str, **kwargs: Any
+ ) -> ItemPaged["_models.TrustedAccessRoleBinding"]:
+ """List trusted access role bindings.
+
+ List trusted access role bindings.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :return: An iterator like instance of either TrustedAccessRoleBinding or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.TrustedAccessRoleBindingListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("TrustedAccessRoleBindingListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
+
+ @distributed_trace
+ def get(
+ self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any
+ ) -> _models.TrustedAccessRoleBinding:
+ """Get a trusted access role binding.
+
+ Get a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :return: TrustedAccessRoleBinding or the result of cls(response)
+ :rtype: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None)
+
+ _request = build_get_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]],
+ **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ content_type = content_type or "application/json"
+ _json = None
+ _content = None
+ if isinstance(trusted_access_role_binding, (IOBase, bytes)):
+ _content = trusted_access_role_binding
+ else:
+ _json = self._serialize.body(trusted_access_role_binding, "TrustedAccessRoleBinding")
+
+ _request = build_create_or_update_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ content_type=content_type,
+ json=_json,
+ content=_content,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+
+ return deserialized # type: ignore
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ trusted_access_role_binding: _models.TrustedAccessRoleBinding,
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.TrustedAccessRoleBinding]:
+ """Create or update a trusted access role binding.
+
+ Create or update a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :param trusted_access_role_binding: A trusted access role binding. Required.
+ :type trusted_access_role_binding: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding
+ :keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @overload
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ trusted_access_role_binding: IO[bytes],
+ *,
+ content_type: str = "application/json",
+ **kwargs: Any
+ ) -> LROPoller[_models.TrustedAccessRoleBinding]:
+ """Create or update a trusted access role binding.
+
+ Create or update a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :param trusted_access_role_binding: A trusted access role binding. Required.
+ :type trusted_access_role_binding: IO[bytes]
+ :keyword content_type: Body Parameter content-type. Content type parameter for binary body.
+ Default value is "application/json".
+ :paramtype content_type: str
+ :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+
+ @distributed_trace
+ def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ resource_name: str,
+ trusted_access_role_binding_name: str,
+ trusted_access_role_binding: Union[_models.TrustedAccessRoleBinding, IO[bytes]],
+ **kwargs: Any
+ ) -> LROPoller[_models.TrustedAccessRoleBinding]:
+ """Create or update a trusted access role binding.
+
+ Create or update a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :param trusted_access_role_binding: A trusted access role binding. Is either a
+ TrustedAccessRoleBinding type or a IO[bytes] type. Required.
+ :type trusted_access_role_binding: ~azure.mgmt.containerservice.models.TrustedAccessRoleBinding
+ or IO[bytes]
+ :return: An instance of LROPoller that returns either TrustedAccessRoleBinding or the result of
+ cls(response)
+ :rtype:
+ ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.models.TrustedAccessRoleBinding]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
+ cls: ClsType[_models.TrustedAccessRoleBinding] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ trusted_access_role_binding=trusted_access_role_binding,
+ api_version=api_version,
+ content_type=content_type,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize("TrustedAccessRoleBinding", pipeline_response.http_response)
+ if cls:
+ return cls(pipeline_response, deserialized, {}) # type: ignore
+ return deserialized
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[_models.TrustedAccessRoleBinding].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[_models.TrustedAccessRoleBinding](
+ self._client, raw_result, get_long_running_output, polling_method # type: ignore
+ )
+
+ def _delete_initial(
+ self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any
+ ) -> Iterator[bytes]:
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None)
+
+ _request = build_delete_request(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ _decompress = kwargs.pop("decompress", True)
+ _stream = True
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202, 204]:
+ try:
+ response.read() # Load the body in memory and close the socket
+ except (StreamConsumedError, StreamClosedError):
+ pass
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ response_headers = {}
+ if response.status_code == 202:
+ response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
+
+ deserialized = response.stream_download(self._client._pipeline, decompress=_decompress)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers) # type: ignore
+
+ return deserialized # type: ignore
+
+ @distributed_trace
+ def begin_delete(
+ self, resource_group_name: str, resource_name: str, trusted_access_role_binding_name: str, **kwargs: Any
+ ) -> LROPoller[None]:
+ """Delete a trusted access role binding.
+
+ Delete a trusted access role binding.
+
+ :param resource_group_name: The name of the resource group. The name is case insensitive.
+ Required.
+ :type resource_group_name: str
+ :param resource_name: The name of the managed cluster resource. Required.
+ :type resource_name: str
+ :param trusted_access_role_binding_name: The name of trusted access role binding. Required.
+ :type trusted_access_role_binding_name: str
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[None] = kwargs.pop("cls", None)
+ polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
+ lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
+ cont_token: Optional[str] = kwargs.pop("continuation_token", None)
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ resource_name=resource_name,
+ trusted_access_role_binding_name=trusted_access_role_binding_name,
+ api_version=api_version,
+ cls=lambda x, y, z: x,
+ headers=_headers,
+ params=_params,
+ **kwargs
+ )
+ raw_result.http_response.read() # type: ignore
+ kwargs.pop("error_map", None)
+
+ def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
+ if cls:
+ return cls(pipeline_response, None, {}) # type: ignore
+
+ if polling is True:
+ polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
+ elif polling is False:
+ polling_method = cast(PollingMethod, NoPolling())
+ else:
+ polling_method = polling
+ if cont_token:
+ return LROPoller[None].from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output,
+ )
+ return LROPoller[None](self._client, raw_result, get_long_running_output, polling_method) # type: ignore
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_trusted_access_roles_operations.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_trusted_access_roles_operations.py
new file mode 100644
index 00000000000..b0f3eedfd6f
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/operations/_trusted_access_roles_operations.py
@@ -0,0 +1,168 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from collections.abc import MutableMapping
+from typing import Any, Callable, Optional, TypeVar
+import urllib.parse
+
+from azure.core import PipelineClient
+from azure.core.exceptions import (
+ ClientAuthenticationError,
+ HttpResponseError,
+ ResourceExistsError,
+ ResourceNotFoundError,
+ ResourceNotModifiedError,
+ map_error,
+)
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.rest import HttpRequest, HttpResponse
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.utils import case_insensitive_dict
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models as _models
+from .._configuration import ContainerServiceClientConfiguration
+from .._utils.serialization import Deserializer, Serializer
+
+T = TypeVar("T")
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, dict[str, Any]], Any]]
+List = list
+
+_SERIALIZER = Serializer()
+_SERIALIZER.client_side_validation = False
+
+
+def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
+ _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-10-01"))
+ accept = _headers.pop("Accept", "application/json")
+
+ # Construct URL
+ _url = kwargs.pop(
+ "template_url",
+ "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles",
+ )
+ path_format_arguments = {
+ "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
+ "location": _SERIALIZER.url("location", location, "str", min_length=1),
+ }
+
+ _url: str = _url.format(**path_format_arguments) # type: ignore
+
+ # Construct parameters
+ _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
+
+ # Construct headers
+ _headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
+
+ return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
+
+
+class TrustedAccessRolesOperations:
+ """
+ .. warning::
+ **DO NOT** instantiate this class directly.
+
+ Instead, you should access the following operations through
+ :class:`~azure.mgmt.containerservice.ContainerServiceClient`'s
+ :attr:`trusted_access_roles` attribute.
+ """
+
+ models = _models
+
+ def __init__(self, *args, **kwargs) -> None:
+ input_args = list(args)
+ self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client")
+ self._config: ContainerServiceClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config")
+ self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer")
+ self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer")
+
+ @distributed_trace
+ def list(self, location: str, **kwargs: Any) -> ItemPaged["_models.TrustedAccessRole"]:
+ """List supported trusted access roles.
+
+ List supported trusted access roles.
+
+ :param location: The name of the Azure region. Required.
+ :type location: str
+ :return: An iterator like instance of either TrustedAccessRole or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.models.TrustedAccessRole]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ _headers = kwargs.pop("headers", {}) or {}
+ _params = case_insensitive_dict(kwargs.pop("params", {}) or {})
+
+ api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
+ cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None)
+
+ error_map: MutableMapping = {
+ 401: ClientAuthenticationError,
+ 404: ResourceNotFoundError,
+ 409: ResourceExistsError,
+ 304: ResourceNotModifiedError,
+ }
+ error_map.update(kwargs.pop("error_map", {}) or {})
+
+ def prepare_request(next_link=None):
+ if not next_link:
+
+ _request = build_list_request(
+ location=location,
+ subscription_id=self._config.subscription_id,
+ api_version=api_version,
+ headers=_headers,
+ params=_params,
+ )
+ _request.url = self._client.format_url(_request.url)
+
+ else:
+ # make call to next link with the client's api-version
+ _parsed_next_link = urllib.parse.urlparse(next_link)
+ _next_request_params = case_insensitive_dict(
+ {
+ key: [urllib.parse.quote(v) for v in value]
+ for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
+ }
+ )
+ _next_request_params["api-version"] = self._config.api_version
+ _request = HttpRequest(
+ "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
+ )
+ _request.url = self._client.format_url(_request.url)
+ _request.method = "GET"
+ return _request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize("TrustedAccessRoleListResult", pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem) # type: ignore
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ _request = prepare_request(next_link)
+
+ _stream = False
+ pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
+ _request, stream=_stream, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(
+ _models.ErrorResponse,
+ pipeline_response,
+ )
+ raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(get_next, extract_data)
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/py.typed b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/py.typed
new file mode 100644
index 00000000000..e5aff4f83af
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/2025_10_01/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561.
\ No newline at end of file
diff --git a/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/__init__.py b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/aks-sreclaw/azext_aks_sreclaw/vendored_sdks/azure_mgmt_containerservice/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/aks-sreclaw/conftest.py b/src/aks-sreclaw/conftest.py
new file mode 100644
index 00000000000..dc6783502b3
--- /dev/null
+++ b/src/aks-sreclaw/conftest.py
@@ -0,0 +1,26 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+from __future__ import annotations
+
+import pytest
+
+
+def pytest_addoption(parser: pytest.Parser) -> None:
+ group = parser.getgroup("aks-agent-evals")
+ group.addoption(
+ "--skip-setup",
+ action="store_true",
+ dest="aks_skip_setup",
+ default=False,
+ help="Skip executing before_test commands in AKS Agent evals.",
+ )
+ group.addoption(
+ "--skip-cleanup",
+ action="store_true",
+ dest="aks_skip_cleanup",
+ default=False,
+ help="Skip executing after_test commands in AKS Agent evals.",
+ )
diff --git a/src/aks-sreclaw/setup.cfg b/src/aks-sreclaw/setup.cfg
new file mode 100644
index 00000000000..3c6e79cf31d
--- /dev/null
+++ b/src/aks-sreclaw/setup.cfg
@@ -0,0 +1,2 @@
+[bdist_wheel]
+universal=1
diff --git a/src/aks-sreclaw/setup.py b/src/aks-sreclaw/setup.py
new file mode 100644
index 00000000000..1f60e6f7d0d
--- /dev/null
+++ b/src/aks-sreclaw/setup.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+from codecs import open as open1
+
+from setuptools import find_packages, setup
+
+VERSION = "1.0.0b1"
+
+CLASSIFIERS = [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Intended Audience :: System Administrators",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "License :: OSI Approved :: MIT License",
+]
+
+DEPENDENCIES = [
+ "rich==13.9.4",
+ "kubernetes==24.2.0",
+ "openai>=1.0.0",
+]
+
+with open1("README.rst", "r", encoding="utf-8") as f:
+ README = f.read()
+with open1("HISTORY.rst", "r", encoding="utf-8") as f:
+ HISTORY = f.read()
+
+setup(
+ name="aks-sreclaw",
+ version=VERSION,
+ description="AKS SREClaw is an openclaw-base AI troubleshoot assistant tailored for Azure Kubernetes Service",
+ long_description=README + "\n\n" + HISTORY,
+ license="MIT",
+ author="Microsoft Corporation",
+ author_email="azpycli@microsoft.com",
+ url="https://github.com/Azure/azure-cli-extensions/tree/main/src/aks-sreclaw",
+ classifiers=CLASSIFIERS,
+ packages=find_packages(exclude=["*.tests", "*.tests.*"]),
+ package_data={"azext_aks_sreclaw": ["azext_metadata.json"]},
+ install_requires=DEPENDENCIES,
+)